Root/fs/ocfs2/uptodate.c

1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * uptodate.c
5 *
6 * Tracking the up-to-date-ness of a local buffer_head with respect to
7 * the cluster.
8 *
9 * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
25 *
26 * Standard buffer head caching flags (uptodate, etc) are insufficient
27 * in a clustered environment - a buffer may be marked up to date on
28 * our local node but could have been modified by another cluster
29 * member. As a result an additional (and performant) caching scheme
30 * is required. A further requirement is that we consume as little
31 * memory as possible - we never pin buffer_head structures in order
32 * to cache them.
33 *
34 * We track the existence of up to date buffers on the inodes which
35 * are associated with them. Because we don't want to pin
36 * buffer_heads, this is only a (strong) hint and several other checks
37 * are made in the I/O path to ensure that we don't use a stale or
38 * invalid buffer without going to disk:
39 * - buffer_jbd is used liberally - if a bh is in the journal on
40 * this node then it *must* be up to date.
41 * - the standard buffer_uptodate() macro is used to detect buffers
42 * which may be invalid (even if we have an up to date tracking
43 * item for them)
44 *
45 * For a full understanding of how this code works together, one
46 * should read the callers in dlmglue.c, the I/O functions in
47 * buffer_head_io.c and ocfs2_journal_access in journal.c
48 */
49
50#include <linux/fs.h>
51#include <linux/types.h>
52#include <linux/slab.h>
53#include <linux/highmem.h>
54#include <linux/buffer_head.h>
55#include <linux/rbtree.h>
56
57#include <cluster/masklog.h>
58
59#include "ocfs2.h"
60
61#include "inode.h"
62#include "uptodate.h"
63#include "ocfs2_trace.h"
64
65struct ocfs2_meta_cache_item {
66    struct rb_node c_node;
67    sector_t c_block;
68};
69
70static struct kmem_cache *ocfs2_uptodate_cachep = NULL;
71
72u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci)
73{
74    BUG_ON(!ci || !ci->ci_ops);
75
76    return ci->ci_ops->co_owner(ci);
77}
78
79struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci)
80{
81    BUG_ON(!ci || !ci->ci_ops);
82
83    return ci->ci_ops->co_get_super(ci);
84}
85
86static void ocfs2_metadata_cache_lock(struct ocfs2_caching_info *ci)
87{
88    BUG_ON(!ci || !ci->ci_ops);
89
90    ci->ci_ops->co_cache_lock(ci);
91}
92
93static void ocfs2_metadata_cache_unlock(struct ocfs2_caching_info *ci)
94{
95    BUG_ON(!ci || !ci->ci_ops);
96
97    ci->ci_ops->co_cache_unlock(ci);
98}
99
100void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci)
101{
102    BUG_ON(!ci || !ci->ci_ops);
103
104    ci->ci_ops->co_io_lock(ci);
105}
106
107void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci)
108{
109    BUG_ON(!ci || !ci->ci_ops);
110
111    ci->ci_ops->co_io_unlock(ci);
112}
113
114
115static void ocfs2_metadata_cache_reset(struct ocfs2_caching_info *ci,
116                       int clear)
117{
118    ci->ci_flags |= OCFS2_CACHE_FL_INLINE;
119    ci->ci_num_cached = 0;
120
121    if (clear) {
122        ci->ci_created_trans = 0;
123        ci->ci_last_trans = 0;
124    }
125}
126
127void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci,
128                   const struct ocfs2_caching_operations *ops)
129{
130    BUG_ON(!ops);
131
132    ci->ci_ops = ops;
133    ocfs2_metadata_cache_reset(ci, 1);
134}
135
136void ocfs2_metadata_cache_exit(struct ocfs2_caching_info *ci)
137{
138    ocfs2_metadata_cache_purge(ci);
139    ocfs2_metadata_cache_reset(ci, 1);
140}
141
142
143/* No lock taken here as 'root' is not expected to be visible to other
144 * processes. */
145static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
146{
147    unsigned int purged = 0;
148    struct rb_node *node;
149    struct ocfs2_meta_cache_item *item;
150
151    while ((node = rb_last(root)) != NULL) {
152        item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
153
154        trace_ocfs2_purge_copied_metadata_tree(
155                    (unsigned long long) item->c_block);
156
157        rb_erase(&item->c_node, root);
158        kmem_cache_free(ocfs2_uptodate_cachep, item);
159
160        purged++;
161    }
162    return purged;
163}
164
165/* Called from locking and called from ocfs2_clear_inode. Dump the
166 * cache for a given inode.
167 *
168 * This function is a few more lines longer than necessary due to some
169 * accounting done here, but I think it's worth tracking down those
170 * bugs sooner -- Mark */
171void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci)
172{
173    unsigned int tree, to_purge, purged;
174    struct rb_root root = RB_ROOT;
175
176    BUG_ON(!ci || !ci->ci_ops);
177
178    ocfs2_metadata_cache_lock(ci);
179    tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
180    to_purge = ci->ci_num_cached;
181
182    trace_ocfs2_metadata_cache_purge(
183        (unsigned long long)ocfs2_metadata_cache_owner(ci),
184        to_purge, tree);
185
186    /* If we're a tree, save off the root so that we can safely
187     * initialize the cache. We do the work to free tree members
188     * without the spinlock. */
189    if (tree)
190        root = ci->ci_cache.ci_tree;
191
192    ocfs2_metadata_cache_reset(ci, 0);
193    ocfs2_metadata_cache_unlock(ci);
194
195    purged = ocfs2_purge_copied_metadata_tree(&root);
196    /* If possible, track the number wiped so that we can more
197     * easily detect counting errors. Unfortunately, this is only
198     * meaningful for trees. */
199    if (tree && purged != to_purge)
200        mlog(ML_ERROR, "Owner %llu, count = %u, purged = %u\n",
201             (unsigned long long)ocfs2_metadata_cache_owner(ci),
202             to_purge, purged);
203}
204
205/* Returns the index in the cache array, -1 if not found.
206 * Requires ip_lock. */
207static int ocfs2_search_cache_array(struct ocfs2_caching_info *ci,
208                    sector_t item)
209{
210    int i;
211
212    for (i = 0; i < ci->ci_num_cached; i++) {
213        if (item == ci->ci_cache.ci_array[i])
214            return i;
215    }
216
217    return -1;
218}
219
220/* Returns the cache item if found, otherwise NULL.
221 * Requires ip_lock. */
222static struct ocfs2_meta_cache_item *
223ocfs2_search_cache_tree(struct ocfs2_caching_info *ci,
224            sector_t block)
225{
226    struct rb_node * n = ci->ci_cache.ci_tree.rb_node;
227    struct ocfs2_meta_cache_item *item = NULL;
228
229    while (n) {
230        item = rb_entry(n, struct ocfs2_meta_cache_item, c_node);
231
232        if (block < item->c_block)
233            n = n->rb_left;
234        else if (block > item->c_block)
235            n = n->rb_right;
236        else
237            return item;
238    }
239
240    return NULL;
241}
242
243static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
244                   struct buffer_head *bh)
245{
246    int index = -1;
247    struct ocfs2_meta_cache_item *item = NULL;
248
249    ocfs2_metadata_cache_lock(ci);
250
251    trace_ocfs2_buffer_cached_begin(
252        (unsigned long long)ocfs2_metadata_cache_owner(ci),
253        (unsigned long long) bh->b_blocknr,
254        !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
255
256    if (ci->ci_flags & OCFS2_CACHE_FL_INLINE)
257        index = ocfs2_search_cache_array(ci, bh->b_blocknr);
258    else
259        item = ocfs2_search_cache_tree(ci, bh->b_blocknr);
260
261    ocfs2_metadata_cache_unlock(ci);
262
263    trace_ocfs2_buffer_cached_end(index, item);
264
265    return (index != -1) || (item != NULL);
266}
267
268/* Warning: even if it returns true, this does *not* guarantee that
269 * the block is stored in our inode metadata cache.
270 *
271 * This can be called under lock_buffer()
272 */
273int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
274              struct buffer_head *bh)
275{
276    /* Doesn't matter if the bh is in our cache or not -- if it's
277     * not marked uptodate then we know it can't have correct
278     * data. */
279    if (!buffer_uptodate(bh))
280        return 0;
281
282    /* OCFS2 does not allow multiple nodes to be changing the same
283     * block at the same time. */
284    if (buffer_jbd(bh))
285        return 1;
286
287    /* Ok, locally the buffer is marked as up to date, now search
288     * our cache to see if we can trust that. */
289    return ocfs2_buffer_cached(ci, bh);
290}
291
292/*
293 * Determine whether a buffer is currently out on a read-ahead request.
294 * ci_io_sem should be held to serialize submitters with the logic here.
295 */
296int ocfs2_buffer_read_ahead(struct ocfs2_caching_info *ci,
297                struct buffer_head *bh)
298{
299    return buffer_locked(bh) && ocfs2_buffer_cached(ci, bh);
300}
301
302/* Requires ip_lock */
303static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
304                     sector_t block)
305{
306    BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY);
307
308    trace_ocfs2_append_cache_array(
309        (unsigned long long)ocfs2_metadata_cache_owner(ci),
310        (unsigned long long)block, ci->ci_num_cached);
311
312    ci->ci_cache.ci_array[ci->ci_num_cached] = block;
313    ci->ci_num_cached++;
314}
315
316/* By now the caller should have checked that the item does *not*
317 * exist in the tree.
318 * Requires ip_lock. */
319static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
320                      struct ocfs2_meta_cache_item *new)
321{
322    sector_t block = new->c_block;
323    struct rb_node *parent = NULL;
324    struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
325    struct ocfs2_meta_cache_item *tmp;
326
327    trace_ocfs2_insert_cache_tree(
328        (unsigned long long)ocfs2_metadata_cache_owner(ci),
329        (unsigned long long)block, ci->ci_num_cached);
330
331    while(*p) {
332        parent = *p;
333
334        tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node);
335
336        if (block < tmp->c_block)
337            p = &(*p)->rb_left;
338        else if (block > tmp->c_block)
339            p = &(*p)->rb_right;
340        else {
341            /* This should never happen! */
342            mlog(ML_ERROR, "Duplicate block %llu cached!\n",
343                 (unsigned long long) block);
344            BUG();
345        }
346    }
347
348    rb_link_node(&new->c_node, parent, p);
349    rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree);
350    ci->ci_num_cached++;
351}
352
353/* co_cache_lock() must be held */
354static inline int ocfs2_insert_can_use_array(struct ocfs2_caching_info *ci)
355{
356    return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) &&
357        (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY);
358}
359
360/* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the
361 * pointers in tree after we use them - this allows caller to detect
362 * when to free in case of error.
363 *
364 * The co_cache_lock() must be held. */
365static void ocfs2_expand_cache(struct ocfs2_caching_info *ci,
366                   struct ocfs2_meta_cache_item **tree)
367{
368    int i;
369
370    mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY,
371            "Owner %llu, num cached = %u, should be %u\n",
372            (unsigned long long)ocfs2_metadata_cache_owner(ci),
373            ci->ci_num_cached, OCFS2_CACHE_INFO_MAX_ARRAY);
374    mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE),
375            "Owner %llu not marked as inline anymore!\n",
376            (unsigned long long)ocfs2_metadata_cache_owner(ci));
377
378    /* Be careful to initialize the tree members *first* because
379     * once the ci_tree is used, the array is junk... */
380    for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
381        tree[i]->c_block = ci->ci_cache.ci_array[i];
382
383    ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE;
384    ci->ci_cache.ci_tree = RB_ROOT;
385    /* this will be set again by __ocfs2_insert_cache_tree */
386    ci->ci_num_cached = 0;
387
388    for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
389        __ocfs2_insert_cache_tree(ci, tree[i]);
390        tree[i] = NULL;
391    }
392
393    trace_ocfs2_expand_cache(
394        (unsigned long long)ocfs2_metadata_cache_owner(ci),
395        ci->ci_flags, ci->ci_num_cached);
396}
397
398/* Slow path function - memory allocation is necessary. See the
399 * comment above ocfs2_set_buffer_uptodate for more information. */
400static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
401                    sector_t block,
402                    int expand_tree)
403{
404    int i;
405    struct ocfs2_meta_cache_item *new = NULL;
406    struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
407        { NULL, };
408
409    trace_ocfs2_set_buffer_uptodate(
410        (unsigned long long)ocfs2_metadata_cache_owner(ci),
411        (unsigned long long)block, expand_tree);
412
413    new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
414    if (!new) {
415        mlog_errno(-ENOMEM);
416        return;
417    }
418    new->c_block = block;
419
420    if (expand_tree) {
421        /* Do *not* allocate an array here - the removal code
422         * has no way of tracking that. */
423        for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
424            tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
425                           GFP_NOFS);
426            if (!tree[i]) {
427                mlog_errno(-ENOMEM);
428                goto out_free;
429            }
430
431            /* These are initialized in ocfs2_expand_cache! */
432        }
433    }
434
435    ocfs2_metadata_cache_lock(ci);
436    if (ocfs2_insert_can_use_array(ci)) {
437        /* Ok, items were removed from the cache in between
438         * locks. Detect this and revert back to the fast path */
439        ocfs2_append_cache_array(ci, block);
440        ocfs2_metadata_cache_unlock(ci);
441        goto out_free;
442    }
443
444    if (expand_tree)
445        ocfs2_expand_cache(ci, tree);
446
447    __ocfs2_insert_cache_tree(ci, new);
448    ocfs2_metadata_cache_unlock(ci);
449
450    new = NULL;
451out_free:
452    if (new)
453        kmem_cache_free(ocfs2_uptodate_cachep, new);
454
455    /* If these were used, then ocfs2_expand_cache re-set them to
456     * NULL for us. */
457    if (tree[0]) {
458        for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
459            if (tree[i])
460                kmem_cache_free(ocfs2_uptodate_cachep,
461                        tree[i]);
462    }
463}
464
465/* Item insertion is guarded by co_io_lock(), so the insertion path takes
466 * advantage of this by not rechecking for a duplicate insert during
467 * the slow case. Additionally, if the cache needs to be bumped up to
468 * a tree, the code will not recheck after acquiring the lock --
469 * multiple paths cannot be expanding to a tree at the same time.
470 *
471 * The slow path takes into account that items can be removed
472 * (including the whole tree wiped and reset) when this process it out
473 * allocating memory. In those cases, it reverts back to the fast
474 * path.
475 *
476 * Note that this function may actually fail to insert the block if
477 * memory cannot be allocated. This is not fatal however (but may
478 * result in a performance penalty)
479 *
480 * Readahead buffers can be passed in here before the I/O request is
481 * completed.
482 */
483void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
484                   struct buffer_head *bh)
485{
486    int expand;
487
488    /* The block may very well exist in our cache already, so avoid
489     * doing any more work in that case. */
490    if (ocfs2_buffer_cached(ci, bh))
491        return;
492
493    trace_ocfs2_set_buffer_uptodate_begin(
494        (unsigned long long)ocfs2_metadata_cache_owner(ci),
495        (unsigned long long)bh->b_blocknr);
496
497    /* No need to recheck under spinlock - insertion is guarded by
498     * co_io_lock() */
499    ocfs2_metadata_cache_lock(ci);
500    if (ocfs2_insert_can_use_array(ci)) {
501        /* Fast case - it's an array and there's a free
502         * spot. */
503        ocfs2_append_cache_array(ci, bh->b_blocknr);
504        ocfs2_metadata_cache_unlock(ci);
505        return;
506    }
507
508    expand = 0;
509    if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
510        /* We need to bump things up to a tree. */
511        expand = 1;
512    }
513    ocfs2_metadata_cache_unlock(ci);
514
515    __ocfs2_set_buffer_uptodate(ci, bh->b_blocknr, expand);
516}
517
518/* Called against a newly allocated buffer. Most likely nobody should
519 * be able to read this sort of metadata while it's still being
520 * allocated, but this is careful to take co_io_lock() anyway. */
521void ocfs2_set_new_buffer_uptodate(struct ocfs2_caching_info *ci,
522                   struct buffer_head *bh)
523{
524    /* This should definitely *not* exist in our cache */
525    BUG_ON(ocfs2_buffer_cached(ci, bh));
526
527    set_buffer_uptodate(bh);
528
529    ocfs2_metadata_cache_io_lock(ci);
530    ocfs2_set_buffer_uptodate(ci, bh);
531    ocfs2_metadata_cache_io_unlock(ci);
532}
533
534/* Requires ip_lock. */
535static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
536                    int index)
537{
538    sector_t *array = ci->ci_cache.ci_array;
539    int bytes;
540
541    BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY);
542    BUG_ON(index >= ci->ci_num_cached);
543    BUG_ON(!ci->ci_num_cached);
544
545    trace_ocfs2_remove_metadata_array(
546        (unsigned long long)ocfs2_metadata_cache_owner(ci),
547        index, ci->ci_num_cached);
548
549    ci->ci_num_cached--;
550
551    /* don't need to copy if the array is now empty, or if we
552     * removed at the tail */
553    if (ci->ci_num_cached && index < ci->ci_num_cached) {
554        bytes = sizeof(sector_t) * (ci->ci_num_cached - index);
555        memmove(&array[index], &array[index + 1], bytes);
556    }
557}
558
559/* Requires ip_lock. */
560static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
561                       struct ocfs2_meta_cache_item *item)
562{
563    trace_ocfs2_remove_metadata_tree(
564        (unsigned long long)ocfs2_metadata_cache_owner(ci),
565        (unsigned long long)item->c_block);
566
567    rb_erase(&item->c_node, &ci->ci_cache.ci_tree);
568    ci->ci_num_cached--;
569}
570
571static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci,
572                      sector_t block)
573{
574    int index;
575    struct ocfs2_meta_cache_item *item = NULL;
576
577    ocfs2_metadata_cache_lock(ci);
578    trace_ocfs2_remove_block_from_cache(
579        (unsigned long long)ocfs2_metadata_cache_owner(ci),
580        (unsigned long long) block, ci->ci_num_cached,
581        ci->ci_flags);
582
583    if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
584        index = ocfs2_search_cache_array(ci, block);
585        if (index != -1)
586            ocfs2_remove_metadata_array(ci, index);
587    } else {
588        item = ocfs2_search_cache_tree(ci, block);
589        if (item)
590            ocfs2_remove_metadata_tree(ci, item);
591    }
592    ocfs2_metadata_cache_unlock(ci);
593
594    if (item)
595        kmem_cache_free(ocfs2_uptodate_cachep, item);
596}
597
598/*
599 * Called when we remove a chunk of metadata from an inode. We don't
600 * bother reverting things to an inlined array in the case of a remove
601 * which moves us back under the limit.
602 */
603void ocfs2_remove_from_cache(struct ocfs2_caching_info *ci,
604                 struct buffer_head *bh)
605{
606    sector_t block = bh->b_blocknr;
607
608    ocfs2_remove_block_from_cache(ci, block);
609}
610
611/* Called when we remove xattr clusters from an inode. */
612void ocfs2_remove_xattr_clusters_from_cache(struct ocfs2_caching_info *ci,
613                        sector_t block,
614                        u32 c_len)
615{
616    struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
617    unsigned int i, b_len = ocfs2_clusters_to_blocks(sb, 1) * c_len;
618
619    for (i = 0; i < b_len; i++, block++)
620        ocfs2_remove_block_from_cache(ci, block);
621}
622
623int __init init_ocfs2_uptodate_cache(void)
624{
625    ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate",
626                  sizeof(struct ocfs2_meta_cache_item),
627                  0, SLAB_HWCACHE_ALIGN, NULL);
628    if (!ocfs2_uptodate_cachep)
629        return -ENOMEM;
630
631    return 0;
632}
633
634void exit_ocfs2_uptodate_cache(void)
635{
636    if (ocfs2_uptodate_cachep)
637        kmem_cache_destroy(ocfs2_uptodate_cachep);
638}
639

Archive Download this file



interactive