Root/fs/ocfs2/buffer_head_io.c

1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * io.c
5 *
6 * Buffer cache handling
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26#include <linux/fs.h>
27#include <linux/types.h>
28#include <linux/highmem.h>
29
30#include <cluster/masklog.h>
31
32#include "ocfs2.h"
33
34#include "alloc.h"
35#include "inode.h"
36#include "journal.h"
37#include "uptodate.h"
38
39#include "buffer_head_io.h"
40
41/*
42 * Bits on bh->b_state used by ocfs2.
43 *
44 * These MUST be after the JBD2 bits. Hence, we use BH_JBDPrivateStart.
45 */
46enum ocfs2_state_bits {
47    BH_NeedsValidate = BH_JBDPrivateStart,
48};
49
50/* Expand the magic b_state functions */
51BUFFER_FNS(NeedsValidate, needs_validate);
52
53int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
54              struct ocfs2_caching_info *ci)
55{
56    int ret = 0;
57
58    mlog_entry("(bh->b_blocknr = %llu, ci=%p)\n",
59           (unsigned long long)bh->b_blocknr, ci);
60
61    BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
62    BUG_ON(buffer_jbd(bh));
63
64    /* No need to check for a soft readonly file system here. non
65     * journalled writes are only ever done on system files which
66     * can get modified during recovery even if read-only. */
67    if (ocfs2_is_hard_readonly(osb)) {
68        ret = -EROFS;
69        goto out;
70    }
71
72    ocfs2_metadata_cache_io_lock(ci);
73
74    lock_buffer(bh);
75    set_buffer_uptodate(bh);
76
77    /* remove from dirty list before I/O. */
78    clear_buffer_dirty(bh);
79
80    get_bh(bh); /* for end_buffer_write_sync() */
81    bh->b_end_io = end_buffer_write_sync;
82    submit_bh(WRITE, bh);
83
84    wait_on_buffer(bh);
85
86    if (buffer_uptodate(bh)) {
87        ocfs2_set_buffer_uptodate(ci, bh);
88    } else {
89        /* We don't need to remove the clustered uptodate
90         * information for this bh as it's not marked locally
91         * uptodate. */
92        ret = -EIO;
93        put_bh(bh);
94    }
95
96    ocfs2_metadata_cache_io_unlock(ci);
97out:
98    mlog_exit(ret);
99    return ret;
100}
101
102int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
103               unsigned int nr, struct buffer_head *bhs[])
104{
105    int status = 0;
106    unsigned int i;
107    struct buffer_head *bh;
108
109    if (!nr) {
110        mlog(ML_BH_IO, "No buffers will be read!\n");
111        goto bail;
112    }
113
114    for (i = 0 ; i < nr ; i++) {
115        if (bhs[i] == NULL) {
116            bhs[i] = sb_getblk(osb->sb, block++);
117            if (bhs[i] == NULL) {
118                status = -EIO;
119                mlog_errno(status);
120                goto bail;
121            }
122        }
123        bh = bhs[i];
124
125        if (buffer_jbd(bh)) {
126            mlog(ML_BH_IO,
127                 "trying to sync read a jbd "
128                 "managed bh (blocknr = %llu), skipping\n",
129                 (unsigned long long)bh->b_blocknr);
130            continue;
131        }
132
133        if (buffer_dirty(bh)) {
134            /* This should probably be a BUG, or
135             * at least return an error. */
136            mlog(ML_ERROR,
137                 "trying to sync read a dirty "
138                 "buffer! (blocknr = %llu), skipping\n",
139                 (unsigned long long)bh->b_blocknr);
140            continue;
141        }
142
143        lock_buffer(bh);
144        if (buffer_jbd(bh)) {
145            mlog(ML_ERROR,
146                 "block %llu had the JBD bit set "
147                 "while I was in lock_buffer!",
148                 (unsigned long long)bh->b_blocknr);
149            BUG();
150        }
151
152        clear_buffer_uptodate(bh);
153        get_bh(bh); /* for end_buffer_read_sync() */
154        bh->b_end_io = end_buffer_read_sync;
155        submit_bh(READ, bh);
156    }
157
158    for (i = nr; i > 0; i--) {
159        bh = bhs[i - 1];
160
161        /* No need to wait on the buffer if it's managed by JBD. */
162        if (!buffer_jbd(bh))
163            wait_on_buffer(bh);
164
165        if (!buffer_uptodate(bh)) {
166            /* Status won't be cleared from here on out,
167             * so we can safely record this and loop back
168             * to cleanup the other buffers. */
169            status = -EIO;
170            put_bh(bh);
171            bhs[i - 1] = NULL;
172        }
173    }
174
175bail:
176    return status;
177}
178
179int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
180              struct buffer_head *bhs[], int flags,
181              int (*validate)(struct super_block *sb,
182                      struct buffer_head *bh))
183{
184    int status = 0;
185    int i, ignore_cache = 0;
186    struct buffer_head *bh;
187    struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
188
189    mlog_entry("(ci=%p, block=(%llu), nr=(%d), flags=%d)\n",
190           ci, (unsigned long long)block, nr, flags);
191
192    BUG_ON(!ci);
193    BUG_ON((flags & OCFS2_BH_READAHEAD) &&
194           (flags & OCFS2_BH_IGNORE_CACHE));
195
196    if (bhs == NULL) {
197        status = -EINVAL;
198        mlog_errno(status);
199        goto bail;
200    }
201
202    if (nr < 0) {
203        mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
204        status = -EINVAL;
205        mlog_errno(status);
206        goto bail;
207    }
208
209    if (nr == 0) {
210        mlog(ML_BH_IO, "No buffers will be read!\n");
211        status = 0;
212        goto bail;
213    }
214
215    ocfs2_metadata_cache_io_lock(ci);
216    for (i = 0 ; i < nr ; i++) {
217        if (bhs[i] == NULL) {
218            bhs[i] = sb_getblk(sb, block++);
219            if (bhs[i] == NULL) {
220                ocfs2_metadata_cache_io_unlock(ci);
221                status = -EIO;
222                mlog_errno(status);
223                goto bail;
224            }
225        }
226        bh = bhs[i];
227        ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
228
229        /* There are three read-ahead cases here which we need to
230         * be concerned with. All three assume a buffer has
231         * previously been submitted with OCFS2_BH_READAHEAD
232         * and it hasn't yet completed I/O.
233         *
234         * 1) The current request is sync to disk. This rarely
235         * happens these days, and never when performance
236         * matters - the code can just wait on the buffer
237         * lock and re-submit.
238         *
239         * 2) The current request is cached, but not
240         * readahead. ocfs2_buffer_uptodate() will return
241         * false anyway, so we'll wind up waiting on the
242         * buffer lock to do I/O. We re-check the request
243         * with after getting the lock to avoid a re-submit.
244         *
245         * 3) The current request is readahead (and so must
246         * also be a caching one). We short circuit if the
247         * buffer is locked (under I/O) and if it's in the
248         * uptodate cache. The re-check from #2 catches the
249         * case that the previous read-ahead completes just
250         * before our is-it-in-flight check.
251         */
252
253        if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
254            mlog(ML_UPTODATE,
255                 "bh (%llu), owner %llu not uptodate\n",
256                 (unsigned long long)bh->b_blocknr,
257                 (unsigned long long)ocfs2_metadata_cache_owner(ci));
258            /* We're using ignore_cache here to say
259             * "go to disk" */
260            ignore_cache = 1;
261        }
262
263        if (buffer_jbd(bh)) {
264            if (ignore_cache)
265                mlog(ML_BH_IO, "trying to sync read a jbd "
266                           "managed bh (blocknr = %llu)\n",
267                     (unsigned long long)bh->b_blocknr);
268            continue;
269        }
270
271        if (ignore_cache) {
272            if (buffer_dirty(bh)) {
273                /* This should probably be a BUG, or
274                 * at least return an error. */
275                mlog(ML_BH_IO, "asking me to sync read a dirty "
276                           "buffer! (blocknr = %llu)\n",
277                     (unsigned long long)bh->b_blocknr);
278                continue;
279            }
280
281            /* A read-ahead request was made - if the
282             * buffer is already under read-ahead from a
283             * previously submitted request than we are
284             * done here. */
285            if ((flags & OCFS2_BH_READAHEAD)
286                && ocfs2_buffer_read_ahead(ci, bh))
287                continue;
288
289            lock_buffer(bh);
290            if (buffer_jbd(bh)) {
291#ifdef CATCH_BH_JBD_RACES
292                mlog(ML_ERROR, "block %llu had the JBD bit set "
293                           "while I was in lock_buffer!",
294                     (unsigned long long)bh->b_blocknr);
295                BUG();
296#else
297                unlock_buffer(bh);
298                continue;
299#endif
300            }
301
302            /* Re-check ocfs2_buffer_uptodate() as a
303             * previously read-ahead buffer may have
304             * completed I/O while we were waiting for the
305             * buffer lock. */
306            if (!(flags & OCFS2_BH_IGNORE_CACHE)
307                && !(flags & OCFS2_BH_READAHEAD)
308                && ocfs2_buffer_uptodate(ci, bh)) {
309                unlock_buffer(bh);
310                continue;
311            }
312
313            clear_buffer_uptodate(bh);
314            get_bh(bh); /* for end_buffer_read_sync() */
315            if (validate)
316                set_buffer_needs_validate(bh);
317            bh->b_end_io = end_buffer_read_sync;
318            submit_bh(READ, bh);
319            continue;
320        }
321    }
322
323    status = 0;
324
325    for (i = (nr - 1); i >= 0; i--) {
326        bh = bhs[i];
327
328        if (!(flags & OCFS2_BH_READAHEAD)) {
329            /* We know this can't have changed as we hold the
330             * owner sem. Avoid doing any work on the bh if the
331             * journal has it. */
332            if (!buffer_jbd(bh))
333                wait_on_buffer(bh);
334
335            if (!buffer_uptodate(bh)) {
336                /* Status won't be cleared from here on out,
337                 * so we can safely record this and loop back
338                 * to cleanup the other buffers. Don't need to
339                 * remove the clustered uptodate information
340                 * for this bh as it's not marked locally
341                 * uptodate. */
342                status = -EIO;
343                put_bh(bh);
344                bhs[i] = NULL;
345                continue;
346            }
347
348            if (buffer_needs_validate(bh)) {
349                /* We never set NeedsValidate if the
350                 * buffer was held by the journal, so
351                 * that better not have changed */
352                BUG_ON(buffer_jbd(bh));
353                clear_buffer_needs_validate(bh);
354                status = validate(sb, bh);
355                if (status) {
356                    put_bh(bh);
357                    bhs[i] = NULL;
358                    continue;
359                }
360            }
361        }
362
363        /* Always set the buffer in the cache, even if it was
364         * a forced read, or read-ahead which hasn't yet
365         * completed. */
366        ocfs2_set_buffer_uptodate(ci, bh);
367    }
368    ocfs2_metadata_cache_io_unlock(ci);
369
370    mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
371         (unsigned long long)block, nr,
372         ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
373         flags);
374
375bail:
376
377    mlog_exit(status);
378    return status;
379}
380
381/* Check whether the blkno is the super block or one of the backups. */
382static void ocfs2_check_super_or_backup(struct super_block *sb,
383                    sector_t blkno)
384{
385    int i;
386    u64 backup_blkno;
387
388    if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
389        return;
390
391    for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
392        backup_blkno = ocfs2_backup_super_blkno(sb, i);
393        if (backup_blkno == blkno)
394            return;
395    }
396
397    BUG();
398}
399
400/*
401 * Write super block and backups doesn't need to collaborate with journal,
402 * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
403 * into this function.
404 */
405int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
406                struct buffer_head *bh)
407{
408    int ret = 0;
409
410    mlog_entry_void();
411
412    BUG_ON(buffer_jbd(bh));
413    ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
414
415    if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
416        ret = -EROFS;
417        goto out;
418    }
419
420    lock_buffer(bh);
421    set_buffer_uptodate(bh);
422
423    /* remove from dirty list before I/O. */
424    clear_buffer_dirty(bh);
425
426    get_bh(bh); /* for end_buffer_write_sync() */
427    bh->b_end_io = end_buffer_write_sync;
428    submit_bh(WRITE, bh);
429
430    wait_on_buffer(bh);
431
432    if (!buffer_uptodate(bh)) {
433        ret = -EIO;
434        put_bh(bh);
435    }
436
437out:
438    mlog_exit(ret);
439    return ret;
440}
441

Archive Download this file



interactive