Root/
1 | /* |
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ |
18 | #include "xfs.h" |
19 | #include "xfs_fs.h" |
20 | #include "xfs_types.h" |
21 | #include "xfs_bit.h" |
22 | #include "xfs_log.h" |
23 | #include "xfs_inum.h" |
24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" |
28 | #include "xfs_dmapi.h" |
29 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" |
34 | #include "xfs_attr_sf.h" |
35 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" |
37 | #include "xfs_buf_item.h" |
38 | #include "xfs_trans_priv.h" |
39 | #include "xfs_error.h" |
40 | #include "xfs_rw.h" |
41 | #include "xfs_trace.h" |
42 | |
43 | |
44 | STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *, |
45 | xfs_daddr_t, int); |
46 | STATIC xfs_buf_t *xfs_trans_buf_item_match_all(xfs_trans_t *, xfs_buftarg_t *, |
47 | xfs_daddr_t, int); |
48 | |
49 | /* |
50 | * Add the locked buffer to the transaction. |
51 | * |
52 | * The buffer must be locked, and it cannot be associated with any |
53 | * transaction. |
54 | * |
55 | * If the buffer does not yet have a buf log item associated with it, |
56 | * then allocate one for it. Then add the buf item to the transaction. |
57 | */ |
58 | STATIC void |
59 | _xfs_trans_bjoin( |
60 | struct xfs_trans *tp, |
61 | struct xfs_buf *bp, |
62 | int reset_recur) |
63 | { |
64 | struct xfs_buf_log_item *bip; |
65 | |
66 | ASSERT(XFS_BUF_ISBUSY(bp)); |
67 | ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); |
68 | |
69 | /* |
70 | * The xfs_buf_log_item pointer is stored in b_fsprivate. If |
71 | * it doesn't have one yet, then allocate one and initialize it. |
72 | * The checks to see if one is there are in xfs_buf_item_init(). |
73 | */ |
74 | xfs_buf_item_init(bp, tp->t_mountp); |
75 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
76 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
77 | ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); |
78 | ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); |
79 | if (reset_recur) |
80 | bip->bli_recur = 0; |
81 | |
82 | /* |
83 | * Take a reference for this transaction on the buf item. |
84 | */ |
85 | atomic_inc(&bip->bli_refcount); |
86 | |
87 | /* |
88 | * Get a log_item_desc to point at the new item. |
89 | */ |
90 | (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip); |
91 | |
92 | /* |
93 | * Initialize b_fsprivate2 so we can find it with incore_match() |
94 | * in xfs_trans_get_buf() and friends above. |
95 | */ |
96 | XFS_BUF_SET_FSPRIVATE2(bp, tp); |
97 | |
98 | } |
99 | |
100 | void |
101 | xfs_trans_bjoin( |
102 | struct xfs_trans *tp, |
103 | struct xfs_buf *bp) |
104 | { |
105 | _xfs_trans_bjoin(tp, bp, 0); |
106 | trace_xfs_trans_bjoin(bp->b_fspriv); |
107 | } |
108 | |
109 | /* |
110 | * Get and lock the buffer for the caller if it is not already |
111 | * locked within the given transaction. If it is already locked |
112 | * within the transaction, just increment its lock recursion count |
113 | * and return a pointer to it. |
114 | * |
115 | * Use the fast path function xfs_trans_buf_item_match() or the buffer |
116 | * cache routine incore_match() to find the buffer |
117 | * if it is already owned by this transaction. |
118 | * |
119 | * If we don't already own the buffer, use get_buf() to get it. |
120 | * If it doesn't yet have an associated xfs_buf_log_item structure, |
121 | * then allocate one and add the item to this transaction. |
122 | * |
123 | * If the transaction pointer is NULL, make this just a normal |
124 | * get_buf() call. |
125 | */ |
126 | xfs_buf_t * |
127 | xfs_trans_get_buf(xfs_trans_t *tp, |
128 | xfs_buftarg_t *target_dev, |
129 | xfs_daddr_t blkno, |
130 | int len, |
131 | uint flags) |
132 | { |
133 | xfs_buf_t *bp; |
134 | xfs_buf_log_item_t *bip; |
135 | |
136 | if (flags == 0) |
137 | flags = XBF_LOCK | XBF_MAPPED; |
138 | |
139 | /* |
140 | * Default to a normal get_buf() call if the tp is NULL. |
141 | */ |
142 | if (tp == NULL) |
143 | return xfs_buf_get(target_dev, blkno, len, |
144 | flags | XBF_DONT_BLOCK); |
145 | |
146 | /* |
147 | * If we find the buffer in the cache with this transaction |
148 | * pointer in its b_fsprivate2 field, then we know we already |
149 | * have it locked. In this case we just increment the lock |
150 | * recursion count and return the buffer to the caller. |
151 | */ |
152 | if (tp->t_items.lic_next == NULL) { |
153 | bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len); |
154 | } else { |
155 | bp = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len); |
156 | } |
157 | if (bp != NULL) { |
158 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); |
159 | if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) |
160 | XFS_BUF_SUPER_STALE(bp); |
161 | |
162 | /* |
163 | * If the buffer is stale then it was binval'ed |
164 | * since last read. This doesn't matter since the |
165 | * caller isn't allowed to use the data anyway. |
166 | */ |
167 | else if (XFS_BUF_ISSTALE(bp)) |
168 | ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); |
169 | |
170 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
171 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
172 | ASSERT(bip != NULL); |
173 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
174 | bip->bli_recur++; |
175 | trace_xfs_trans_get_buf_recur(bip); |
176 | return (bp); |
177 | } |
178 | |
179 | /* |
180 | * We always specify the XBF_DONT_BLOCK flag within a transaction |
181 | * so that get_buf does not try to push out a delayed write buffer |
182 | * which might cause another transaction to take place (if the |
183 | * buffer was delayed alloc). Such recursive transactions can |
184 | * easily deadlock with our current transaction as well as cause |
185 | * us to run out of stack space. |
186 | */ |
187 | bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK); |
188 | if (bp == NULL) { |
189 | return NULL; |
190 | } |
191 | |
192 | ASSERT(!XFS_BUF_GETERROR(bp)); |
193 | |
194 | _xfs_trans_bjoin(tp, bp, 1); |
195 | trace_xfs_trans_get_buf(bp->b_fspriv); |
196 | return (bp); |
197 | } |
198 | |
199 | /* |
200 | * Get and lock the superblock buffer of this file system for the |
201 | * given transaction. |
202 | * |
203 | * We don't need to use incore_match() here, because the superblock |
204 | * buffer is a private buffer which we keep a pointer to in the |
205 | * mount structure. |
206 | */ |
207 | xfs_buf_t * |
208 | xfs_trans_getsb(xfs_trans_t *tp, |
209 | struct xfs_mount *mp, |
210 | int flags) |
211 | { |
212 | xfs_buf_t *bp; |
213 | xfs_buf_log_item_t *bip; |
214 | |
215 | /* |
216 | * Default to just trying to lock the superblock buffer |
217 | * if tp is NULL. |
218 | */ |
219 | if (tp == NULL) { |
220 | return (xfs_getsb(mp, flags)); |
221 | } |
222 | |
223 | /* |
224 | * If the superblock buffer already has this transaction |
225 | * pointer in its b_fsprivate2 field, then we know we already |
226 | * have it locked. In this case we just increment the lock |
227 | * recursion count and return the buffer to the caller. |
228 | */ |
229 | bp = mp->m_sb_bp; |
230 | if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) { |
231 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); |
232 | ASSERT(bip != NULL); |
233 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
234 | bip->bli_recur++; |
235 | trace_xfs_trans_getsb_recur(bip); |
236 | return (bp); |
237 | } |
238 | |
239 | bp = xfs_getsb(mp, flags); |
240 | if (bp == NULL) |
241 | return NULL; |
242 | |
243 | _xfs_trans_bjoin(tp, bp, 1); |
244 | trace_xfs_trans_getsb(bp->b_fspriv); |
245 | return (bp); |
246 | } |
247 | |
248 | #ifdef DEBUG |
249 | xfs_buftarg_t *xfs_error_target; |
250 | int xfs_do_error; |
251 | int xfs_req_num; |
252 | int xfs_error_mod = 33; |
253 | #endif |
254 | |
255 | /* |
256 | * Get and lock the buffer for the caller if it is not already |
257 | * locked within the given transaction. If it has not yet been |
258 | * read in, read it from disk. If it is already locked |
259 | * within the transaction and already read in, just increment its |
260 | * lock recursion count and return a pointer to it. |
261 | * |
262 | * Use the fast path function xfs_trans_buf_item_match() or the buffer |
263 | * cache routine incore_match() to find the buffer |
264 | * if it is already owned by this transaction. |
265 | * |
266 | * If we don't already own the buffer, use read_buf() to get it. |
267 | * If it doesn't yet have an associated xfs_buf_log_item structure, |
268 | * then allocate one and add the item to this transaction. |
269 | * |
270 | * If the transaction pointer is NULL, make this just a normal |
271 | * read_buf() call. |
272 | */ |
273 | int |
274 | xfs_trans_read_buf( |
275 | xfs_mount_t *mp, |
276 | xfs_trans_t *tp, |
277 | xfs_buftarg_t *target, |
278 | xfs_daddr_t blkno, |
279 | int len, |
280 | uint flags, |
281 | xfs_buf_t **bpp) |
282 | { |
283 | xfs_buf_t *bp; |
284 | xfs_buf_log_item_t *bip; |
285 | int error; |
286 | |
287 | if (flags == 0) |
288 | flags = XBF_LOCK | XBF_MAPPED; |
289 | |
290 | /* |
291 | * Default to a normal get_buf() call if the tp is NULL. |
292 | */ |
293 | if (tp == NULL) { |
294 | bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK); |
295 | if (!bp) |
296 | return (flags & XBF_TRYLOCK) ? |
297 | EAGAIN : XFS_ERROR(ENOMEM); |
298 | |
299 | if (XFS_BUF_GETERROR(bp) != 0) { |
300 | xfs_ioerror_alert("xfs_trans_read_buf", mp, |
301 | bp, blkno); |
302 | error = XFS_BUF_GETERROR(bp); |
303 | xfs_buf_relse(bp); |
304 | return error; |
305 | } |
306 | #ifdef DEBUG |
307 | if (xfs_do_error) { |
308 | if (xfs_error_target == target) { |
309 | if (((xfs_req_num++) % xfs_error_mod) == 0) { |
310 | xfs_buf_relse(bp); |
311 | cmn_err(CE_DEBUG, "Returning error!\n"); |
312 | return XFS_ERROR(EIO); |
313 | } |
314 | } |
315 | } |
316 | #endif |
317 | if (XFS_FORCED_SHUTDOWN(mp)) |
318 | goto shutdown_abort; |
319 | *bpp = bp; |
320 | return 0; |
321 | } |
322 | |
323 | /* |
324 | * If we find the buffer in the cache with this transaction |
325 | * pointer in its b_fsprivate2 field, then we know we already |
326 | * have it locked. If it is already read in we just increment |
327 | * the lock recursion count and return the buffer to the caller. |
328 | * If the buffer is not yet read in, then we read it in, increment |
329 | * the lock recursion count, and return it to the caller. |
330 | */ |
331 | if (tp->t_items.lic_next == NULL) { |
332 | bp = xfs_trans_buf_item_match(tp, target, blkno, len); |
333 | } else { |
334 | bp = xfs_trans_buf_item_match_all(tp, target, blkno, len); |
335 | } |
336 | if (bp != NULL) { |
337 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); |
338 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
339 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
340 | ASSERT((XFS_BUF_ISERROR(bp)) == 0); |
341 | if (!(XFS_BUF_ISDONE(bp))) { |
342 | trace_xfs_trans_read_buf_io(bp, _RET_IP_); |
343 | ASSERT(!XFS_BUF_ISASYNC(bp)); |
344 | XFS_BUF_READ(bp); |
345 | xfsbdstrat(tp->t_mountp, bp); |
346 | error = xfs_iowait(bp); |
347 | if (error) { |
348 | xfs_ioerror_alert("xfs_trans_read_buf", mp, |
349 | bp, blkno); |
350 | xfs_buf_relse(bp); |
351 | /* |
352 | * We can gracefully recover from most read |
353 | * errors. Ones we can't are those that happen |
354 | * after the transaction's already dirty. |
355 | */ |
356 | if (tp->t_flags & XFS_TRANS_DIRTY) |
357 | xfs_force_shutdown(tp->t_mountp, |
358 | SHUTDOWN_META_IO_ERROR); |
359 | return error; |
360 | } |
361 | } |
362 | /* |
363 | * We never locked this buf ourselves, so we shouldn't |
364 | * brelse it either. Just get out. |
365 | */ |
366 | if (XFS_FORCED_SHUTDOWN(mp)) { |
367 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); |
368 | *bpp = NULL; |
369 | return XFS_ERROR(EIO); |
370 | } |
371 | |
372 | |
373 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); |
374 | bip->bli_recur++; |
375 | |
376 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
377 | trace_xfs_trans_read_buf_recur(bip); |
378 | *bpp = bp; |
379 | return 0; |
380 | } |
381 | |
382 | /* |
383 | * We always specify the XBF_DONT_BLOCK flag within a transaction |
384 | * so that get_buf does not try to push out a delayed write buffer |
385 | * which might cause another transaction to take place (if the |
386 | * buffer was delayed alloc). Such recursive transactions can |
387 | * easily deadlock with our current transaction as well as cause |
388 | * us to run out of stack space. |
389 | */ |
390 | bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK); |
391 | if (bp == NULL) { |
392 | *bpp = NULL; |
393 | return 0; |
394 | } |
395 | if (XFS_BUF_GETERROR(bp) != 0) { |
396 | XFS_BUF_SUPER_STALE(bp); |
397 | error = XFS_BUF_GETERROR(bp); |
398 | |
399 | xfs_ioerror_alert("xfs_trans_read_buf", mp, |
400 | bp, blkno); |
401 | if (tp->t_flags & XFS_TRANS_DIRTY) |
402 | xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); |
403 | xfs_buf_relse(bp); |
404 | return error; |
405 | } |
406 | #ifdef DEBUG |
407 | if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) { |
408 | if (xfs_error_target == target) { |
409 | if (((xfs_req_num++) % xfs_error_mod) == 0) { |
410 | xfs_force_shutdown(tp->t_mountp, |
411 | SHUTDOWN_META_IO_ERROR); |
412 | xfs_buf_relse(bp); |
413 | cmn_err(CE_DEBUG, "Returning trans error!\n"); |
414 | return XFS_ERROR(EIO); |
415 | } |
416 | } |
417 | } |
418 | #endif |
419 | if (XFS_FORCED_SHUTDOWN(mp)) |
420 | goto shutdown_abort; |
421 | |
422 | _xfs_trans_bjoin(tp, bp, 1); |
423 | trace_xfs_trans_read_buf(bp->b_fspriv); |
424 | |
425 | *bpp = bp; |
426 | return 0; |
427 | |
428 | shutdown_abort: |
429 | /* |
430 | * the theory here is that buffer is good but we're |
431 | * bailing out because the filesystem is being forcibly |
432 | * shut down. So we should leave the b_flags alone since |
433 | * the buffer's not staled and just get out. |
434 | */ |
435 | #if defined(DEBUG) |
436 | if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) |
437 | cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp); |
438 | #endif |
439 | ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) != |
440 | (XBF_STALE|XBF_DELWRI)); |
441 | |
442 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); |
443 | xfs_buf_relse(bp); |
444 | *bpp = NULL; |
445 | return XFS_ERROR(EIO); |
446 | } |
447 | |
448 | |
449 | /* |
450 | * Release the buffer bp which was previously acquired with one of the |
451 | * xfs_trans_... buffer allocation routines if the buffer has not |
452 | * been modified within this transaction. If the buffer is modified |
453 | * within this transaction, do decrement the recursion count but do |
454 | * not release the buffer even if the count goes to 0. If the buffer is not |
455 | * modified within the transaction, decrement the recursion count and |
456 | * release the buffer if the recursion count goes to 0. |
457 | * |
458 | * If the buffer is to be released and it was not modified before |
459 | * this transaction began, then free the buf_log_item associated with it. |
460 | * |
461 | * If the transaction pointer is NULL, make this just a normal |
462 | * brelse() call. |
463 | */ |
464 | void |
465 | xfs_trans_brelse(xfs_trans_t *tp, |
466 | xfs_buf_t *bp) |
467 | { |
468 | xfs_buf_log_item_t *bip; |
469 | xfs_log_item_t *lip; |
470 | xfs_log_item_desc_t *lidp; |
471 | |
472 | /* |
473 | * Default to a normal brelse() call if the tp is NULL. |
474 | */ |
475 | if (tp == NULL) { |
476 | ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); |
477 | /* |
478 | * If there's a buf log item attached to the buffer, |
479 | * then let the AIL know that the buffer is being |
480 | * unlocked. |
481 | */ |
482 | if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { |
483 | lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); |
484 | if (lip->li_type == XFS_LI_BUF) { |
485 | bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); |
486 | xfs_trans_unlocked_item(bip->bli_item.li_ailp, |
487 | lip); |
488 | } |
489 | } |
490 | xfs_buf_relse(bp); |
491 | return; |
492 | } |
493 | |
494 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
495 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
496 | ASSERT(bip->bli_item.li_type == XFS_LI_BUF); |
497 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
498 | ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); |
499 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
500 | |
501 | /* |
502 | * Find the item descriptor pointing to this buffer's |
503 | * log item. It must be there. |
504 | */ |
505 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); |
506 | ASSERT(lidp != NULL); |
507 | |
508 | trace_xfs_trans_brelse(bip); |
509 | |
510 | /* |
511 | * If the release is just for a recursive lock, |
512 | * then decrement the count and return. |
513 | */ |
514 | if (bip->bli_recur > 0) { |
515 | bip->bli_recur--; |
516 | return; |
517 | } |
518 | |
519 | /* |
520 | * If the buffer is dirty within this transaction, we can't |
521 | * release it until we commit. |
522 | */ |
523 | if (lidp->lid_flags & XFS_LID_DIRTY) |
524 | return; |
525 | |
526 | /* |
527 | * If the buffer has been invalidated, then we can't release |
528 | * it until the transaction commits to disk unless it is re-dirtied |
529 | * as part of this transaction. This prevents us from pulling |
530 | * the item from the AIL before we should. |
531 | */ |
532 | if (bip->bli_flags & XFS_BLI_STALE) |
533 | return; |
534 | |
535 | ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); |
536 | |
537 | /* |
538 | * Free up the log item descriptor tracking the released item. |
539 | */ |
540 | xfs_trans_free_item(tp, lidp); |
541 | |
542 | /* |
543 | * Clear the hold flag in the buf log item if it is set. |
544 | * We wouldn't want the next user of the buffer to |
545 | * get confused. |
546 | */ |
547 | if (bip->bli_flags & XFS_BLI_HOLD) { |
548 | bip->bli_flags &= ~XFS_BLI_HOLD; |
549 | } |
550 | |
551 | /* |
552 | * Drop our reference to the buf log item. |
553 | */ |
554 | atomic_dec(&bip->bli_refcount); |
555 | |
556 | /* |
557 | * If the buf item is not tracking data in the log, then |
558 | * we must free it before releasing the buffer back to the |
559 | * free pool. Before releasing the buffer to the free pool, |
560 | * clear the transaction pointer in b_fsprivate2 to dissolve |
561 | * its relation to this transaction. |
562 | */ |
563 | if (!xfs_buf_item_dirty(bip)) { |
564 | /*** |
565 | ASSERT(bp->b_pincount == 0); |
566 | ***/ |
567 | ASSERT(atomic_read(&bip->bli_refcount) == 0); |
568 | ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); |
569 | ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); |
570 | xfs_buf_item_relse(bp); |
571 | bip = NULL; |
572 | } |
573 | XFS_BUF_SET_FSPRIVATE2(bp, NULL); |
574 | |
575 | /* |
576 | * If we've still got a buf log item on the buffer, then |
577 | * tell the AIL that the buffer is being unlocked. |
578 | */ |
579 | if (bip != NULL) { |
580 | xfs_trans_unlocked_item(bip->bli_item.li_ailp, |
581 | (xfs_log_item_t*)bip); |
582 | } |
583 | |
584 | xfs_buf_relse(bp); |
585 | return; |
586 | } |
587 | |
588 | /* |
589 | * Mark the buffer as not needing to be unlocked when the buf item's |
590 | * IOP_UNLOCK() routine is called. The buffer must already be locked |
591 | * and associated with the given transaction. |
592 | */ |
593 | /* ARGSUSED */ |
594 | void |
595 | xfs_trans_bhold(xfs_trans_t *tp, |
596 | xfs_buf_t *bp) |
597 | { |
598 | xfs_buf_log_item_t *bip; |
599 | |
600 | ASSERT(XFS_BUF_ISBUSY(bp)); |
601 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
602 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
603 | |
604 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
605 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
606 | ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); |
607 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
608 | bip->bli_flags |= XFS_BLI_HOLD; |
609 | trace_xfs_trans_bhold(bip); |
610 | } |
611 | |
612 | /* |
613 | * Cancel the previous buffer hold request made on this buffer |
614 | * for this transaction. |
615 | */ |
616 | void |
617 | xfs_trans_bhold_release(xfs_trans_t *tp, |
618 | xfs_buf_t *bp) |
619 | { |
620 | xfs_buf_log_item_t *bip; |
621 | |
622 | ASSERT(XFS_BUF_ISBUSY(bp)); |
623 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
624 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
625 | |
626 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
627 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
628 | ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); |
629 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
630 | ASSERT(bip->bli_flags & XFS_BLI_HOLD); |
631 | bip->bli_flags &= ~XFS_BLI_HOLD; |
632 | |
633 | trace_xfs_trans_bhold_release(bip); |
634 | } |
635 | |
636 | /* |
637 | * This is called to mark bytes first through last inclusive of the given |
638 | * buffer as needing to be logged when the transaction is committed. |
639 | * The buffer must already be associated with the given transaction. |
640 | * |
641 | * First and last are numbers relative to the beginning of this buffer, |
642 | * so the first byte in the buffer is numbered 0 regardless of the |
643 | * value of b_blkno. |
644 | */ |
645 | void |
646 | xfs_trans_log_buf(xfs_trans_t *tp, |
647 | xfs_buf_t *bp, |
648 | uint first, |
649 | uint last) |
650 | { |
651 | xfs_buf_log_item_t *bip; |
652 | xfs_log_item_desc_t *lidp; |
653 | |
654 | ASSERT(XFS_BUF_ISBUSY(bp)); |
655 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
656 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
657 | ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); |
658 | ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) || |
659 | (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks)); |
660 | |
661 | /* |
662 | * Mark the buffer as needing to be written out eventually, |
663 | * and set its iodone function to remove the buffer's buf log |
664 | * item from the AIL and free it when the buffer is flushed |
665 | * to disk. See xfs_buf_attach_iodone() for more details |
666 | * on li_cb and xfs_buf_iodone_callbacks(). |
667 | * If we end up aborting this transaction, we trap this buffer |
668 | * inside the b_bdstrat callback so that this won't get written to |
669 | * disk. |
670 | */ |
671 | XFS_BUF_DELAYWRITE(bp); |
672 | XFS_BUF_DONE(bp); |
673 | |
674 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
675 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
676 | XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); |
677 | bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone; |
678 | |
679 | trace_xfs_trans_log_buf(bip); |
680 | |
681 | /* |
682 | * If we invalidated the buffer within this transaction, then |
683 | * cancel the invalidation now that we're dirtying the buffer |
684 | * again. There are no races with the code in xfs_buf_item_unpin(), |
685 | * because we have a reference to the buffer this entire time. |
686 | */ |
687 | if (bip->bli_flags & XFS_BLI_STALE) { |
688 | bip->bli_flags &= ~XFS_BLI_STALE; |
689 | ASSERT(XFS_BUF_ISSTALE(bp)); |
690 | XFS_BUF_UNSTALE(bp); |
691 | bip->bli_format.blf_flags &= ~XFS_BLI_CANCEL; |
692 | } |
693 | |
694 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); |
695 | ASSERT(lidp != NULL); |
696 | |
697 | tp->t_flags |= XFS_TRANS_DIRTY; |
698 | lidp->lid_flags |= XFS_LID_DIRTY; |
699 | lidp->lid_flags &= ~XFS_LID_BUF_STALE; |
700 | bip->bli_flags |= XFS_BLI_LOGGED; |
701 | xfs_buf_item_log(bip, first, last); |
702 | } |
703 | |
704 | |
705 | /* |
706 | * This called to invalidate a buffer that is being used within |
707 | * a transaction. Typically this is because the blocks in the |
708 | * buffer are being freed, so we need to prevent it from being |
709 | * written out when we're done. Allowing it to be written again |
710 | * might overwrite data in the free blocks if they are reallocated |
711 | * to a file. |
712 | * |
713 | * We prevent the buffer from being written out by clearing the |
714 | * B_DELWRI flag. We can't always |
715 | * get rid of the buf log item at this point, though, because |
716 | * the buffer may still be pinned by another transaction. If that |
717 | * is the case, then we'll wait until the buffer is committed to |
718 | * disk for the last time (we can tell by the ref count) and |
719 | * free it in xfs_buf_item_unpin(). Until it is cleaned up we |
720 | * will keep the buffer locked so that the buffer and buf log item |
721 | * are not reused. |
722 | */ |
723 | void |
724 | xfs_trans_binval( |
725 | xfs_trans_t *tp, |
726 | xfs_buf_t *bp) |
727 | { |
728 | xfs_log_item_desc_t *lidp; |
729 | xfs_buf_log_item_t *bip; |
730 | |
731 | ASSERT(XFS_BUF_ISBUSY(bp)); |
732 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
733 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
734 | |
735 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
736 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); |
737 | ASSERT(lidp != NULL); |
738 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
739 | |
740 | trace_xfs_trans_binval(bip); |
741 | |
742 | if (bip->bli_flags & XFS_BLI_STALE) { |
743 | /* |
744 | * If the buffer is already invalidated, then |
745 | * just return. |
746 | */ |
747 | ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); |
748 | ASSERT(XFS_BUF_ISSTALE(bp)); |
749 | ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); |
750 | ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_INODE_BUF)); |
751 | ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); |
752 | ASSERT(lidp->lid_flags & XFS_LID_DIRTY); |
753 | ASSERT(tp->t_flags & XFS_TRANS_DIRTY); |
754 | return; |
755 | } |
756 | |
757 | /* |
758 | * Clear the dirty bit in the buffer and set the STALE flag |
759 | * in the buf log item. The STALE flag will be used in |
760 | * xfs_buf_item_unpin() to determine if it should clean up |
761 | * when the last reference to the buf item is given up. |
762 | * We set the XFS_BLI_CANCEL flag in the buf log format structure |
763 | * and log the buf item. This will be used at recovery time |
764 | * to determine that copies of the buffer in the log before |
765 | * this should not be replayed. |
766 | * We mark the item descriptor and the transaction dirty so |
767 | * that we'll hold the buffer until after the commit. |
768 | * |
769 | * Since we're invalidating the buffer, we also clear the state |
770 | * about which parts of the buffer have been logged. We also |
771 | * clear the flag indicating that this is an inode buffer since |
772 | * the data in the buffer will no longer be valid. |
773 | * |
774 | * We set the stale bit in the buffer as well since we're getting |
775 | * rid of it. |
776 | */ |
777 | XFS_BUF_UNDELAYWRITE(bp); |
778 | XFS_BUF_STALE(bp); |
779 | bip->bli_flags |= XFS_BLI_STALE; |
780 | bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_DIRTY); |
781 | bip->bli_format.blf_flags &= ~XFS_BLI_INODE_BUF; |
782 | bip->bli_format.blf_flags |= XFS_BLI_CANCEL; |
783 | memset((char *)(bip->bli_format.blf_data_map), 0, |
784 | (bip->bli_format.blf_map_size * sizeof(uint))); |
785 | lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE; |
786 | tp->t_flags |= XFS_TRANS_DIRTY; |
787 | } |
788 | |
789 | /* |
790 | * This call is used to indicate that the buffer contains on-disk |
791 | * inodes which must be handled specially during recovery. They |
792 | * require special handling because only the di_next_unlinked from |
793 | * the inodes in the buffer should be recovered. The rest of the |
794 | * data in the buffer is logged via the inodes themselves. |
795 | * |
796 | * All we do is set the XFS_BLI_INODE_BUF flag in the buffer's log |
797 | * format structure so that we'll know what to do at recovery time. |
798 | */ |
799 | /* ARGSUSED */ |
800 | void |
801 | xfs_trans_inode_buf( |
802 | xfs_trans_t *tp, |
803 | xfs_buf_t *bp) |
804 | { |
805 | xfs_buf_log_item_t *bip; |
806 | |
807 | ASSERT(XFS_BUF_ISBUSY(bp)); |
808 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
809 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
810 | |
811 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
812 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
813 | |
814 | bip->bli_format.blf_flags |= XFS_BLI_INODE_BUF; |
815 | } |
816 | |
817 | /* |
818 | * This call is used to indicate that the buffer is going to |
819 | * be staled and was an inode buffer. This means it gets |
820 | * special processing during unpin - where any inodes |
821 | * associated with the buffer should be removed from ail. |
822 | * There is also special processing during recovery, |
823 | * any replay of the inodes in the buffer needs to be |
824 | * prevented as the buffer may have been reused. |
825 | */ |
826 | void |
827 | xfs_trans_stale_inode_buf( |
828 | xfs_trans_t *tp, |
829 | xfs_buf_t *bp) |
830 | { |
831 | xfs_buf_log_item_t *bip; |
832 | |
833 | ASSERT(XFS_BUF_ISBUSY(bp)); |
834 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
835 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
836 | |
837 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
838 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
839 | |
840 | bip->bli_flags |= XFS_BLI_STALE_INODE; |
841 | bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) |
842 | xfs_buf_iodone; |
843 | } |
844 | |
845 | |
846 | |
847 | /* |
848 | * Mark the buffer as being one which contains newly allocated |
849 | * inodes. We need to make sure that even if this buffer is |
850 | * relogged as an 'inode buf' we still recover all of the inode |
851 | * images in the face of a crash. This works in coordination with |
852 | * xfs_buf_item_committed() to ensure that the buffer remains in the |
853 | * AIL at its original location even after it has been relogged. |
854 | */ |
855 | /* ARGSUSED */ |
856 | void |
857 | xfs_trans_inode_alloc_buf( |
858 | xfs_trans_t *tp, |
859 | xfs_buf_t *bp) |
860 | { |
861 | xfs_buf_log_item_t *bip; |
862 | |
863 | ASSERT(XFS_BUF_ISBUSY(bp)); |
864 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
865 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
866 | |
867 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
868 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
869 | |
870 | bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; |
871 | } |
872 | |
873 | |
874 | /* |
875 | * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of |
876 | * dquots. However, unlike in inode buffer recovery, dquot buffers get |
877 | * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag). |
878 | * The only thing that makes dquot buffers different from regular |
879 | * buffers is that we must not replay dquot bufs when recovering |
880 | * if a _corresponding_ quotaoff has happened. We also have to distinguish |
881 | * between usr dquot bufs and grp dquot bufs, because usr and grp quotas |
882 | * can be turned off independently. |
883 | */ |
884 | /* ARGSUSED */ |
885 | void |
886 | xfs_trans_dquot_buf( |
887 | xfs_trans_t *tp, |
888 | xfs_buf_t *bp, |
889 | uint type) |
890 | { |
891 | xfs_buf_log_item_t *bip; |
892 | |
893 | ASSERT(XFS_BUF_ISBUSY(bp)); |
894 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
895 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
896 | ASSERT(type == XFS_BLI_UDQUOT_BUF || |
897 | type == XFS_BLI_PDQUOT_BUF || |
898 | type == XFS_BLI_GDQUOT_BUF); |
899 | |
900 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
901 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
902 | |
903 | bip->bli_format.blf_flags |= type; |
904 | } |
905 | |
906 | /* |
907 | * Check to see if a buffer matching the given parameters is already |
908 | * a part of the given transaction. Only check the first, embedded |
909 | * chunk, since we don't want to spend all day scanning large transactions. |
910 | */ |
911 | STATIC xfs_buf_t * |
912 | xfs_trans_buf_item_match( |
913 | xfs_trans_t *tp, |
914 | xfs_buftarg_t *target, |
915 | xfs_daddr_t blkno, |
916 | int len) |
917 | { |
918 | xfs_log_item_chunk_t *licp; |
919 | xfs_log_item_desc_t *lidp; |
920 | xfs_buf_log_item_t *blip; |
921 | xfs_buf_t *bp; |
922 | int i; |
923 | |
924 | bp = NULL; |
925 | len = BBTOB(len); |
926 | licp = &tp->t_items; |
927 | if (!xfs_lic_are_all_free(licp)) { |
928 | for (i = 0; i < licp->lic_unused; i++) { |
929 | /* |
930 | * Skip unoccupied slots. |
931 | */ |
932 | if (xfs_lic_isfree(licp, i)) { |
933 | continue; |
934 | } |
935 | |
936 | lidp = xfs_lic_slot(licp, i); |
937 | blip = (xfs_buf_log_item_t *)lidp->lid_item; |
938 | if (blip->bli_item.li_type != XFS_LI_BUF) { |
939 | continue; |
940 | } |
941 | |
942 | bp = blip->bli_buf; |
943 | if ((XFS_BUF_TARGET(bp) == target) && |
944 | (XFS_BUF_ADDR(bp) == blkno) && |
945 | (XFS_BUF_COUNT(bp) == len)) { |
946 | /* |
947 | * We found it. Break out and |
948 | * return the pointer to the buffer. |
949 | */ |
950 | break; |
951 | } else { |
952 | bp = NULL; |
953 | } |
954 | } |
955 | } |
956 | return bp; |
957 | } |
958 | |
959 | /* |
960 | * Check to see if a buffer matching the given parameters is already |
961 | * a part of the given transaction. Check all the chunks, we |
962 | * want to be thorough. |
963 | */ |
964 | STATIC xfs_buf_t * |
965 | xfs_trans_buf_item_match_all( |
966 | xfs_trans_t *tp, |
967 | xfs_buftarg_t *target, |
968 | xfs_daddr_t blkno, |
969 | int len) |
970 | { |
971 | xfs_log_item_chunk_t *licp; |
972 | xfs_log_item_desc_t *lidp; |
973 | xfs_buf_log_item_t *blip; |
974 | xfs_buf_t *bp; |
975 | int i; |
976 | |
977 | bp = NULL; |
978 | len = BBTOB(len); |
979 | for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) { |
980 | if (xfs_lic_are_all_free(licp)) { |
981 | ASSERT(licp == &tp->t_items); |
982 | ASSERT(licp->lic_next == NULL); |
983 | return NULL; |
984 | } |
985 | for (i = 0; i < licp->lic_unused; i++) { |
986 | /* |
987 | * Skip unoccupied slots. |
988 | */ |
989 | if (xfs_lic_isfree(licp, i)) { |
990 | continue; |
991 | } |
992 | |
993 | lidp = xfs_lic_slot(licp, i); |
994 | blip = (xfs_buf_log_item_t *)lidp->lid_item; |
995 | if (blip->bli_item.li_type != XFS_LI_BUF) { |
996 | continue; |
997 | } |
998 | |
999 | bp = blip->bli_buf; |
1000 | if ((XFS_BUF_TARGET(bp) == target) && |
1001 | (XFS_BUF_ADDR(bp) == blkno) && |
1002 | (XFS_BUF_COUNT(bp) == len)) { |
1003 | /* |
1004 | * We found it. Break out and |
1005 | * return the pointer to the buffer. |
1006 | */ |
1007 | return bp; |
1008 | } |
1009 | } |
1010 | } |
1011 | return NULL; |
1012 | } |
1013 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9