Root/
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: |
3 | * |
4 | * journal.c |
5 | * |
6 | * Defines functions of journalling api |
7 | * |
8 | * Copyright (C) 2003, 2004 Oracle. All rights reserved. |
9 | * |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public |
12 | * License as published by the Free Software Foundation; either |
13 | * version 2 of the License, or (at your option) any later version. |
14 | * |
15 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
18 | * General Public License for more details. |
19 | * |
20 | * You should have received a copy of the GNU General Public |
21 | * License along with this program; if not, write to the |
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
23 | * Boston, MA 021110-1307, USA. |
24 | */ |
25 | |
26 | #include <linux/fs.h> |
27 | #include <linux/types.h> |
28 | #include <linux/slab.h> |
29 | #include <linux/highmem.h> |
30 | #include <linux/kthread.h> |
31 | #include <linux/time.h> |
32 | #include <linux/random.h> |
33 | |
34 | #define MLOG_MASK_PREFIX ML_JOURNAL |
35 | #include <cluster/masklog.h> |
36 | |
37 | #include "ocfs2.h" |
38 | |
39 | #include "alloc.h" |
40 | #include "blockcheck.h" |
41 | #include "dir.h" |
42 | #include "dlmglue.h" |
43 | #include "extent_map.h" |
44 | #include "heartbeat.h" |
45 | #include "inode.h" |
46 | #include "journal.h" |
47 | #include "localalloc.h" |
48 | #include "slot_map.h" |
49 | #include "super.h" |
50 | #include "sysfile.h" |
51 | #include "quota.h" |
52 | |
53 | #include "buffer_head_io.h" |
54 | |
55 | DEFINE_SPINLOCK(trans_inc_lock); |
56 | |
57 | #define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000 |
58 | |
59 | static int ocfs2_force_read_journal(struct inode *inode); |
60 | static int ocfs2_recover_node(struct ocfs2_super *osb, |
61 | int node_num, int slot_num); |
62 | static int __ocfs2_recovery_thread(void *arg); |
63 | static int ocfs2_commit_cache(struct ocfs2_super *osb); |
64 | static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota); |
65 | static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, |
66 | int dirty, int replayed); |
67 | static int ocfs2_trylock_journal(struct ocfs2_super *osb, |
68 | int slot_num); |
69 | static int ocfs2_recover_orphans(struct ocfs2_super *osb, |
70 | int slot); |
71 | static int ocfs2_commit_thread(void *arg); |
72 | static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, |
73 | int slot_num, |
74 | struct ocfs2_dinode *la_dinode, |
75 | struct ocfs2_dinode *tl_dinode, |
76 | struct ocfs2_quota_recovery *qrec); |
77 | |
78 | static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb) |
79 | { |
80 | return __ocfs2_wait_on_mount(osb, 0); |
81 | } |
82 | |
83 | static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb) |
84 | { |
85 | return __ocfs2_wait_on_mount(osb, 1); |
86 | } |
87 | |
88 | /* |
89 | * This replay_map is to track online/offline slots, so we could recover |
90 | * offline slots during recovery and mount |
91 | */ |
92 | |
93 | enum ocfs2_replay_state { |
94 | REPLAY_UNNEEDED = 0, /* Replay is not needed, so ignore this map */ |
95 | REPLAY_NEEDED, /* Replay slots marked in rm_replay_slots */ |
96 | REPLAY_DONE /* Replay was already queued */ |
97 | }; |
98 | |
99 | struct ocfs2_replay_map { |
100 | unsigned int rm_slots; |
101 | enum ocfs2_replay_state rm_state; |
102 | unsigned char rm_replay_slots[0]; |
103 | }; |
104 | |
105 | void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state) |
106 | { |
107 | if (!osb->replay_map) |
108 | return; |
109 | |
110 | /* If we've already queued the replay, we don't have any more to do */ |
111 | if (osb->replay_map->rm_state == REPLAY_DONE) |
112 | return; |
113 | |
114 | osb->replay_map->rm_state = state; |
115 | } |
116 | |
117 | int ocfs2_compute_replay_slots(struct ocfs2_super *osb) |
118 | { |
119 | struct ocfs2_replay_map *replay_map; |
120 | int i, node_num; |
121 | |
122 | /* If replay map is already set, we don't do it again */ |
123 | if (osb->replay_map) |
124 | return 0; |
125 | |
126 | replay_map = kzalloc(sizeof(struct ocfs2_replay_map) + |
127 | (osb->max_slots * sizeof(char)), GFP_KERNEL); |
128 | |
129 | if (!replay_map) { |
130 | mlog_errno(-ENOMEM); |
131 | return -ENOMEM; |
132 | } |
133 | |
134 | spin_lock(&osb->osb_lock); |
135 | |
136 | replay_map->rm_slots = osb->max_slots; |
137 | replay_map->rm_state = REPLAY_UNNEEDED; |
138 | |
139 | /* set rm_replay_slots for offline slot(s) */ |
140 | for (i = 0; i < replay_map->rm_slots; i++) { |
141 | if (ocfs2_slot_to_node_num_locked(osb, i, &node_num) == -ENOENT) |
142 | replay_map->rm_replay_slots[i] = 1; |
143 | } |
144 | |
145 | osb->replay_map = replay_map; |
146 | spin_unlock(&osb->osb_lock); |
147 | return 0; |
148 | } |
149 | |
150 | void ocfs2_queue_replay_slots(struct ocfs2_super *osb) |
151 | { |
152 | struct ocfs2_replay_map *replay_map = osb->replay_map; |
153 | int i; |
154 | |
155 | if (!replay_map) |
156 | return; |
157 | |
158 | if (replay_map->rm_state != REPLAY_NEEDED) |
159 | return; |
160 | |
161 | for (i = 0; i < replay_map->rm_slots; i++) |
162 | if (replay_map->rm_replay_slots[i]) |
163 | ocfs2_queue_recovery_completion(osb->journal, i, NULL, |
164 | NULL, NULL); |
165 | replay_map->rm_state = REPLAY_DONE; |
166 | } |
167 | |
168 | void ocfs2_free_replay_slots(struct ocfs2_super *osb) |
169 | { |
170 | struct ocfs2_replay_map *replay_map = osb->replay_map; |
171 | |
172 | if (!osb->replay_map) |
173 | return; |
174 | |
175 | kfree(replay_map); |
176 | osb->replay_map = NULL; |
177 | } |
178 | |
179 | int ocfs2_recovery_init(struct ocfs2_super *osb) |
180 | { |
181 | struct ocfs2_recovery_map *rm; |
182 | |
183 | mutex_init(&osb->recovery_lock); |
184 | osb->disable_recovery = 0; |
185 | osb->recovery_thread_task = NULL; |
186 | init_waitqueue_head(&osb->recovery_event); |
187 | |
188 | rm = kzalloc(sizeof(struct ocfs2_recovery_map) + |
189 | osb->max_slots * sizeof(unsigned int), |
190 | GFP_KERNEL); |
191 | if (!rm) { |
192 | mlog_errno(-ENOMEM); |
193 | return -ENOMEM; |
194 | } |
195 | |
196 | rm->rm_entries = (unsigned int *)((char *)rm + |
197 | sizeof(struct ocfs2_recovery_map)); |
198 | osb->recovery_map = rm; |
199 | |
200 | return 0; |
201 | } |
202 | |
203 | /* we can't grab the goofy sem lock from inside wait_event, so we use |
204 | * memory barriers to make sure that we'll see the null task before |
205 | * being woken up */ |
206 | static int ocfs2_recovery_thread_running(struct ocfs2_super *osb) |
207 | { |
208 | mb(); |
209 | return osb->recovery_thread_task != NULL; |
210 | } |
211 | |
212 | void ocfs2_recovery_exit(struct ocfs2_super *osb) |
213 | { |
214 | struct ocfs2_recovery_map *rm; |
215 | |
216 | /* disable any new recovery threads and wait for any currently |
217 | * running ones to exit. Do this before setting the vol_state. */ |
218 | mutex_lock(&osb->recovery_lock); |
219 | osb->disable_recovery = 1; |
220 | mutex_unlock(&osb->recovery_lock); |
221 | wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); |
222 | |
223 | /* At this point, we know that no more recovery threads can be |
224 | * launched, so wait for any recovery completion work to |
225 | * complete. */ |
226 | flush_workqueue(ocfs2_wq); |
227 | |
228 | /* |
229 | * Now that recovery is shut down, and the osb is about to be |
230 | * freed, the osb_lock is not taken here. |
231 | */ |
232 | rm = osb->recovery_map; |
233 | /* XXX: Should we bug if there are dirty entries? */ |
234 | |
235 | kfree(rm); |
236 | } |
237 | |
238 | static int __ocfs2_recovery_map_test(struct ocfs2_super *osb, |
239 | unsigned int node_num) |
240 | { |
241 | int i; |
242 | struct ocfs2_recovery_map *rm = osb->recovery_map; |
243 | |
244 | assert_spin_locked(&osb->osb_lock); |
245 | |
246 | for (i = 0; i < rm->rm_used; i++) { |
247 | if (rm->rm_entries[i] == node_num) |
248 | return 1; |
249 | } |
250 | |
251 | return 0; |
252 | } |
253 | |
254 | /* Behaves like test-and-set. Returns the previous value */ |
255 | static int ocfs2_recovery_map_set(struct ocfs2_super *osb, |
256 | unsigned int node_num) |
257 | { |
258 | struct ocfs2_recovery_map *rm = osb->recovery_map; |
259 | |
260 | spin_lock(&osb->osb_lock); |
261 | if (__ocfs2_recovery_map_test(osb, node_num)) { |
262 | spin_unlock(&osb->osb_lock); |
263 | return 1; |
264 | } |
265 | |
266 | /* XXX: Can this be exploited? Not from o2dlm... */ |
267 | BUG_ON(rm->rm_used >= osb->max_slots); |
268 | |
269 | rm->rm_entries[rm->rm_used] = node_num; |
270 | rm->rm_used++; |
271 | spin_unlock(&osb->osb_lock); |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | static void ocfs2_recovery_map_clear(struct ocfs2_super *osb, |
277 | unsigned int node_num) |
278 | { |
279 | int i; |
280 | struct ocfs2_recovery_map *rm = osb->recovery_map; |
281 | |
282 | spin_lock(&osb->osb_lock); |
283 | |
284 | for (i = 0; i < rm->rm_used; i++) { |
285 | if (rm->rm_entries[i] == node_num) |
286 | break; |
287 | } |
288 | |
289 | if (i < rm->rm_used) { |
290 | /* XXX: be careful with the pointer math */ |
291 | memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]), |
292 | (rm->rm_used - i - 1) * sizeof(unsigned int)); |
293 | rm->rm_used--; |
294 | } |
295 | |
296 | spin_unlock(&osb->osb_lock); |
297 | } |
298 | |
299 | static int ocfs2_commit_cache(struct ocfs2_super *osb) |
300 | { |
301 | int status = 0; |
302 | unsigned int flushed; |
303 | unsigned long old_id; |
304 | struct ocfs2_journal *journal = NULL; |
305 | |
306 | mlog_entry_void(); |
307 | |
308 | journal = osb->journal; |
309 | |
310 | /* Flush all pending commits and checkpoint the journal. */ |
311 | down_write(&journal->j_trans_barrier); |
312 | |
313 | if (atomic_read(&journal->j_num_trans) == 0) { |
314 | up_write(&journal->j_trans_barrier); |
315 | mlog(0, "No transactions for me to flush!\n"); |
316 | goto finally; |
317 | } |
318 | |
319 | jbd2_journal_lock_updates(journal->j_journal); |
320 | status = jbd2_journal_flush(journal->j_journal); |
321 | jbd2_journal_unlock_updates(journal->j_journal); |
322 | if (status < 0) { |
323 | up_write(&journal->j_trans_barrier); |
324 | mlog_errno(status); |
325 | goto finally; |
326 | } |
327 | |
328 | old_id = ocfs2_inc_trans_id(journal); |
329 | |
330 | flushed = atomic_read(&journal->j_num_trans); |
331 | atomic_set(&journal->j_num_trans, 0); |
332 | up_write(&journal->j_trans_barrier); |
333 | |
334 | mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n", |
335 | journal->j_trans_id, flushed); |
336 | |
337 | ocfs2_wake_downconvert_thread(osb); |
338 | wake_up(&journal->j_checkpointed); |
339 | finally: |
340 | mlog_exit(status); |
341 | return status; |
342 | } |
343 | |
344 | /* pass it NULL and it will allocate a new handle object for you. If |
345 | * you pass it a handle however, it may still return error, in which |
346 | * case it has free'd the passed handle for you. */ |
347 | handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) |
348 | { |
349 | journal_t *journal = osb->journal->j_journal; |
350 | handle_t *handle; |
351 | |
352 | BUG_ON(!osb || !osb->journal->j_journal); |
353 | |
354 | if (ocfs2_is_hard_readonly(osb)) |
355 | return ERR_PTR(-EROFS); |
356 | |
357 | BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE); |
358 | BUG_ON(max_buffs <= 0); |
359 | |
360 | /* Nested transaction? Just return the handle... */ |
361 | if (journal_current_handle()) |
362 | return jbd2_journal_start(journal, max_buffs); |
363 | |
364 | down_read(&osb->journal->j_trans_barrier); |
365 | |
366 | handle = jbd2_journal_start(journal, max_buffs); |
367 | if (IS_ERR(handle)) { |
368 | up_read(&osb->journal->j_trans_barrier); |
369 | |
370 | mlog_errno(PTR_ERR(handle)); |
371 | |
372 | if (is_journal_aborted(journal)) { |
373 | ocfs2_abort(osb->sb, "Detected aborted journal"); |
374 | handle = ERR_PTR(-EROFS); |
375 | } |
376 | } else { |
377 | if (!ocfs2_mount_local(osb)) |
378 | atomic_inc(&(osb->journal->j_num_trans)); |
379 | } |
380 | |
381 | return handle; |
382 | } |
383 | |
384 | int ocfs2_commit_trans(struct ocfs2_super *osb, |
385 | handle_t *handle) |
386 | { |
387 | int ret, nested; |
388 | struct ocfs2_journal *journal = osb->journal; |
389 | |
390 | BUG_ON(!handle); |
391 | |
392 | nested = handle->h_ref > 1; |
393 | ret = jbd2_journal_stop(handle); |
394 | if (ret < 0) |
395 | mlog_errno(ret); |
396 | |
397 | if (!nested) |
398 | up_read(&journal->j_trans_barrier); |
399 | |
400 | return ret; |
401 | } |
402 | |
403 | /* |
404 | * 'nblocks' is what you want to add to the current |
405 | * transaction. extend_trans will either extend the current handle by |
406 | * nblocks, or commit it and start a new one with nblocks credits. |
407 | * |
408 | * This might call jbd2_journal_restart() which will commit dirty buffers |
409 | * and then restart the transaction. Before calling |
410 | * ocfs2_extend_trans(), any changed blocks should have been |
411 | * dirtied. After calling it, all blocks which need to be changed must |
412 | * go through another set of journal_access/journal_dirty calls. |
413 | * |
414 | * WARNING: This will not release any semaphores or disk locks taken |
415 | * during the transaction, so make sure they were taken *before* |
416 | * start_trans or we'll have ordering deadlocks. |
417 | * |
418 | * WARNING2: Note that we do *not* drop j_trans_barrier here. This is |
419 | * good because transaction ids haven't yet been recorded on the |
420 | * cluster locks associated with this handle. |
421 | */ |
422 | int ocfs2_extend_trans(handle_t *handle, int nblocks) |
423 | { |
424 | int status; |
425 | |
426 | BUG_ON(!handle); |
427 | BUG_ON(!nblocks); |
428 | |
429 | mlog_entry_void(); |
430 | |
431 | mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); |
432 | |
433 | #ifdef CONFIG_OCFS2_DEBUG_FS |
434 | status = 1; |
435 | #else |
436 | status = jbd2_journal_extend(handle, nblocks); |
437 | if (status < 0) { |
438 | mlog_errno(status); |
439 | goto bail; |
440 | } |
441 | #endif |
442 | |
443 | if (status > 0) { |
444 | mlog(0, |
445 | "jbd2_journal_extend failed, trying " |
446 | "jbd2_journal_restart\n"); |
447 | status = jbd2_journal_restart(handle, nblocks); |
448 | if (status < 0) { |
449 | mlog_errno(status); |
450 | goto bail; |
451 | } |
452 | } |
453 | |
454 | status = 0; |
455 | bail: |
456 | |
457 | mlog_exit(status); |
458 | return status; |
459 | } |
460 | |
461 | struct ocfs2_triggers { |
462 | struct jbd2_buffer_trigger_type ot_triggers; |
463 | int ot_offset; |
464 | }; |
465 | |
466 | static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers) |
467 | { |
468 | return container_of(triggers, struct ocfs2_triggers, ot_triggers); |
469 | } |
470 | |
471 | static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers, |
472 | struct buffer_head *bh, |
473 | void *data, size_t size) |
474 | { |
475 | struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers); |
476 | |
477 | /* |
478 | * We aren't guaranteed to have the superblock here, so we |
479 | * must unconditionally compute the ecc data. |
480 | * __ocfs2_journal_access() will only set the triggers if |
481 | * metaecc is enabled. |
482 | */ |
483 | ocfs2_block_check_compute(data, size, data + ot->ot_offset); |
484 | } |
485 | |
486 | /* |
487 | * Quota blocks have their own trigger because the struct ocfs2_block_check |
488 | * offset depends on the blocksize. |
489 | */ |
490 | static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, |
491 | struct buffer_head *bh, |
492 | void *data, size_t size) |
493 | { |
494 | struct ocfs2_disk_dqtrailer *dqt = |
495 | ocfs2_block_dqtrailer(size, data); |
496 | |
497 | /* |
498 | * We aren't guaranteed to have the superblock here, so we |
499 | * must unconditionally compute the ecc data. |
500 | * __ocfs2_journal_access() will only set the triggers if |
501 | * metaecc is enabled. |
502 | */ |
503 | ocfs2_block_check_compute(data, size, &dqt->dq_check); |
504 | } |
505 | |
506 | /* |
507 | * Directory blocks also have their own trigger because the |
508 | * struct ocfs2_block_check offset depends on the blocksize. |
509 | */ |
510 | static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers, |
511 | struct buffer_head *bh, |
512 | void *data, size_t size) |
513 | { |
514 | struct ocfs2_dir_block_trailer *trailer = |
515 | ocfs2_dir_trailer_from_size(size, data); |
516 | |
517 | /* |
518 | * We aren't guaranteed to have the superblock here, so we |
519 | * must unconditionally compute the ecc data. |
520 | * __ocfs2_journal_access() will only set the triggers if |
521 | * metaecc is enabled. |
522 | */ |
523 | ocfs2_block_check_compute(data, size, &trailer->db_check); |
524 | } |
525 | |
526 | static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, |
527 | struct buffer_head *bh) |
528 | { |
529 | mlog(ML_ERROR, |
530 | "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, " |
531 | "bh->b_blocknr = %llu\n", |
532 | (unsigned long)bh, |
533 | (unsigned long long)bh->b_blocknr); |
534 | |
535 | /* We aren't guaranteed to have the superblock here - but if we |
536 | * don't, it'll just crash. */ |
537 | ocfs2_error(bh->b_assoc_map->host->i_sb, |
538 | "JBD2 has aborted our journal, ocfs2 cannot continue\n"); |
539 | } |
540 | |
541 | static struct ocfs2_triggers di_triggers = { |
542 | .ot_triggers = { |
543 | .t_commit = ocfs2_commit_trigger, |
544 | .t_abort = ocfs2_abort_trigger, |
545 | }, |
546 | .ot_offset = offsetof(struct ocfs2_dinode, i_check), |
547 | }; |
548 | |
549 | static struct ocfs2_triggers eb_triggers = { |
550 | .ot_triggers = { |
551 | .t_commit = ocfs2_commit_trigger, |
552 | .t_abort = ocfs2_abort_trigger, |
553 | }, |
554 | .ot_offset = offsetof(struct ocfs2_extent_block, h_check), |
555 | }; |
556 | |
557 | static struct ocfs2_triggers gd_triggers = { |
558 | .ot_triggers = { |
559 | .t_commit = ocfs2_commit_trigger, |
560 | .t_abort = ocfs2_abort_trigger, |
561 | }, |
562 | .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), |
563 | }; |
564 | |
565 | static struct ocfs2_triggers db_triggers = { |
566 | .ot_triggers = { |
567 | .t_commit = ocfs2_db_commit_trigger, |
568 | .t_abort = ocfs2_abort_trigger, |
569 | }, |
570 | }; |
571 | |
572 | static struct ocfs2_triggers xb_triggers = { |
573 | .ot_triggers = { |
574 | .t_commit = ocfs2_commit_trigger, |
575 | .t_abort = ocfs2_abort_trigger, |
576 | }, |
577 | .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), |
578 | }; |
579 | |
580 | static struct ocfs2_triggers dq_triggers = { |
581 | .ot_triggers = { |
582 | .t_commit = ocfs2_dq_commit_trigger, |
583 | .t_abort = ocfs2_abort_trigger, |
584 | }, |
585 | }; |
586 | |
587 | static struct ocfs2_triggers dr_triggers = { |
588 | .ot_triggers = { |
589 | .t_commit = ocfs2_commit_trigger, |
590 | .t_abort = ocfs2_abort_trigger, |
591 | }, |
592 | .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check), |
593 | }; |
594 | |
595 | static struct ocfs2_triggers dl_triggers = { |
596 | .ot_triggers = { |
597 | .t_commit = ocfs2_commit_trigger, |
598 | .t_abort = ocfs2_abort_trigger, |
599 | }, |
600 | .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check), |
601 | }; |
602 | |
603 | static int __ocfs2_journal_access(handle_t *handle, |
604 | struct inode *inode, |
605 | struct buffer_head *bh, |
606 | struct ocfs2_triggers *triggers, |
607 | int type) |
608 | { |
609 | int status; |
610 | |
611 | BUG_ON(!inode); |
612 | BUG_ON(!handle); |
613 | BUG_ON(!bh); |
614 | |
615 | mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", |
616 | (unsigned long long)bh->b_blocknr, type, |
617 | (type == OCFS2_JOURNAL_ACCESS_CREATE) ? |
618 | "OCFS2_JOURNAL_ACCESS_CREATE" : |
619 | "OCFS2_JOURNAL_ACCESS_WRITE", |
620 | bh->b_size); |
621 | |
622 | /* we can safely remove this assertion after testing. */ |
623 | if (!buffer_uptodate(bh)) { |
624 | mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n"); |
625 | mlog(ML_ERROR, "b_blocknr=%llu\n", |
626 | (unsigned long long)bh->b_blocknr); |
627 | BUG(); |
628 | } |
629 | |
630 | /* Set the current transaction information on the inode so |
631 | * that the locking code knows whether it can drop it's locks |
632 | * on this inode or not. We're protected from the commit |
633 | * thread updating the current transaction id until |
634 | * ocfs2_commit_trans() because ocfs2_start_trans() took |
635 | * j_trans_barrier for us. */ |
636 | ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode); |
637 | |
638 | mutex_lock(&OCFS2_I(inode)->ip_io_mutex); |
639 | switch (type) { |
640 | case OCFS2_JOURNAL_ACCESS_CREATE: |
641 | case OCFS2_JOURNAL_ACCESS_WRITE: |
642 | status = jbd2_journal_get_write_access(handle, bh); |
643 | break; |
644 | |
645 | case OCFS2_JOURNAL_ACCESS_UNDO: |
646 | status = jbd2_journal_get_undo_access(handle, bh); |
647 | break; |
648 | |
649 | default: |
650 | status = -EINVAL; |
651 | mlog(ML_ERROR, "Uknown access type!\n"); |
652 | } |
653 | if (!status && ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)) && triggers) |
654 | jbd2_journal_set_triggers(bh, &triggers->ot_triggers); |
655 | mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); |
656 | |
657 | if (status < 0) |
658 | mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", |
659 | status, type); |
660 | |
661 | mlog_exit(status); |
662 | return status; |
663 | } |
664 | |
665 | int ocfs2_journal_access_di(handle_t *handle, struct inode *inode, |
666 | struct buffer_head *bh, int type) |
667 | { |
668 | return __ocfs2_journal_access(handle, inode, bh, &di_triggers, |
669 | type); |
670 | } |
671 | |
672 | int ocfs2_journal_access_eb(handle_t *handle, struct inode *inode, |
673 | struct buffer_head *bh, int type) |
674 | { |
675 | return __ocfs2_journal_access(handle, inode, bh, &eb_triggers, |
676 | type); |
677 | } |
678 | |
679 | int ocfs2_journal_access_gd(handle_t *handle, struct inode *inode, |
680 | struct buffer_head *bh, int type) |
681 | { |
682 | return __ocfs2_journal_access(handle, inode, bh, &gd_triggers, |
683 | type); |
684 | } |
685 | |
686 | int ocfs2_journal_access_db(handle_t *handle, struct inode *inode, |
687 | struct buffer_head *bh, int type) |
688 | { |
689 | return __ocfs2_journal_access(handle, inode, bh, &db_triggers, |
690 | type); |
691 | } |
692 | |
693 | int ocfs2_journal_access_xb(handle_t *handle, struct inode *inode, |
694 | struct buffer_head *bh, int type) |
695 | { |
696 | return __ocfs2_journal_access(handle, inode, bh, &xb_triggers, |
697 | type); |
698 | } |
699 | |
700 | int ocfs2_journal_access_dq(handle_t *handle, struct inode *inode, |
701 | struct buffer_head *bh, int type) |
702 | { |
703 | return __ocfs2_journal_access(handle, inode, bh, &dq_triggers, |
704 | type); |
705 | } |
706 | |
707 | int ocfs2_journal_access_dr(handle_t *handle, struct inode *inode, |
708 | struct buffer_head *bh, int type) |
709 | { |
710 | return __ocfs2_journal_access(handle, inode, bh, &dr_triggers, |
711 | type); |
712 | } |
713 | |
714 | int ocfs2_journal_access_dl(handle_t *handle, struct inode *inode, |
715 | struct buffer_head *bh, int type) |
716 | { |
717 | return __ocfs2_journal_access(handle, inode, bh, &dl_triggers, |
718 | type); |
719 | } |
720 | |
721 | int ocfs2_journal_access(handle_t *handle, struct inode *inode, |
722 | struct buffer_head *bh, int type) |
723 | { |
724 | return __ocfs2_journal_access(handle, inode, bh, NULL, type); |
725 | } |
726 | |
727 | int ocfs2_journal_dirty(handle_t *handle, |
728 | struct buffer_head *bh) |
729 | { |
730 | int status; |
731 | |
732 | mlog_entry("(bh->b_blocknr=%llu)\n", |
733 | (unsigned long long)bh->b_blocknr); |
734 | |
735 | status = jbd2_journal_dirty_metadata(handle, bh); |
736 | if (status < 0) |
737 | mlog(ML_ERROR, "Could not dirty metadata buffer. " |
738 | "(bh->b_blocknr=%llu)\n", |
739 | (unsigned long long)bh->b_blocknr); |
740 | |
741 | mlog_exit(status); |
742 | return status; |
743 | } |
744 | |
745 | #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) |
746 | |
747 | void ocfs2_set_journal_params(struct ocfs2_super *osb) |
748 | { |
749 | journal_t *journal = osb->journal->j_journal; |
750 | unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL; |
751 | |
752 | if (osb->osb_commit_interval) |
753 | commit_interval = osb->osb_commit_interval; |
754 | |
755 | spin_lock(&journal->j_state_lock); |
756 | journal->j_commit_interval = commit_interval; |
757 | if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) |
758 | journal->j_flags |= JBD2_BARRIER; |
759 | else |
760 | journal->j_flags &= ~JBD2_BARRIER; |
761 | spin_unlock(&journal->j_state_lock); |
762 | } |
763 | |
764 | int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) |
765 | { |
766 | int status = -1; |
767 | struct inode *inode = NULL; /* the journal inode */ |
768 | journal_t *j_journal = NULL; |
769 | struct ocfs2_dinode *di = NULL; |
770 | struct buffer_head *bh = NULL; |
771 | struct ocfs2_super *osb; |
772 | int inode_lock = 0; |
773 | |
774 | mlog_entry_void(); |
775 | |
776 | BUG_ON(!journal); |
777 | |
778 | osb = journal->j_osb; |
779 | |
780 | /* already have the inode for our journal */ |
781 | inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, |
782 | osb->slot_num); |
783 | if (inode == NULL) { |
784 | status = -EACCES; |
785 | mlog_errno(status); |
786 | goto done; |
787 | } |
788 | if (is_bad_inode(inode)) { |
789 | mlog(ML_ERROR, "access error (bad inode)\n"); |
790 | iput(inode); |
791 | inode = NULL; |
792 | status = -EACCES; |
793 | goto done; |
794 | } |
795 | |
796 | SET_INODE_JOURNAL(inode); |
797 | OCFS2_I(inode)->ip_open_count++; |
798 | |
799 | /* Skip recovery waits here - journal inode metadata never |
800 | * changes in a live cluster so it can be considered an |
801 | * exception to the rule. */ |
802 | status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); |
803 | if (status < 0) { |
804 | if (status != -ERESTARTSYS) |
805 | mlog(ML_ERROR, "Could not get lock on journal!\n"); |
806 | goto done; |
807 | } |
808 | |
809 | inode_lock = 1; |
810 | di = (struct ocfs2_dinode *)bh->b_data; |
811 | |
812 | if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) { |
813 | mlog(ML_ERROR, "Journal file size (%lld) is too small!\n", |
814 | inode->i_size); |
815 | status = -EINVAL; |
816 | goto done; |
817 | } |
818 | |
819 | mlog(0, "inode->i_size = %lld\n", inode->i_size); |
820 | mlog(0, "inode->i_blocks = %llu\n", |
821 | (unsigned long long)inode->i_blocks); |
822 | mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); |
823 | |
824 | /* call the kernels journal init function now */ |
825 | j_journal = jbd2_journal_init_inode(inode); |
826 | if (j_journal == NULL) { |
827 | mlog(ML_ERROR, "Linux journal layer error\n"); |
828 | status = -EINVAL; |
829 | goto done; |
830 | } |
831 | |
832 | mlog(0, "Returned from jbd2_journal_init_inode\n"); |
833 | mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen); |
834 | |
835 | *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & |
836 | OCFS2_JOURNAL_DIRTY_FL); |
837 | |
838 | journal->j_journal = j_journal; |
839 | journal->j_inode = inode; |
840 | journal->j_bh = bh; |
841 | |
842 | ocfs2_set_journal_params(osb); |
843 | |
844 | journal->j_state = OCFS2_JOURNAL_LOADED; |
845 | |
846 | status = 0; |
847 | done: |
848 | if (status < 0) { |
849 | if (inode_lock) |
850 | ocfs2_inode_unlock(inode, 1); |
851 | brelse(bh); |
852 | if (inode) { |
853 | OCFS2_I(inode)->ip_open_count--; |
854 | iput(inode); |
855 | } |
856 | } |
857 | |
858 | mlog_exit(status); |
859 | return status; |
860 | } |
861 | |
862 | static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di) |
863 | { |
864 | le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1); |
865 | } |
866 | |
867 | static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di) |
868 | { |
869 | return le32_to_cpu(di->id1.journal1.ij_recovery_generation); |
870 | } |
871 | |
872 | static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, |
873 | int dirty, int replayed) |
874 | { |
875 | int status; |
876 | unsigned int flags; |
877 | struct ocfs2_journal *journal = osb->journal; |
878 | struct buffer_head *bh = journal->j_bh; |
879 | struct ocfs2_dinode *fe; |
880 | |
881 | mlog_entry_void(); |
882 | |
883 | fe = (struct ocfs2_dinode *)bh->b_data; |
884 | |
885 | /* The journal bh on the osb always comes from ocfs2_journal_init() |
886 | * and was validated there inside ocfs2_inode_lock_full(). It's a |
887 | * code bug if we mess it up. */ |
888 | BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); |
889 | |
890 | flags = le32_to_cpu(fe->id1.journal1.ij_flags); |
891 | if (dirty) |
892 | flags |= OCFS2_JOURNAL_DIRTY_FL; |
893 | else |
894 | flags &= ~OCFS2_JOURNAL_DIRTY_FL; |
895 | fe->id1.journal1.ij_flags = cpu_to_le32(flags); |
896 | |
897 | if (replayed) |
898 | ocfs2_bump_recovery_generation(fe); |
899 | |
900 | ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); |
901 | status = ocfs2_write_block(osb, bh, journal->j_inode); |
902 | if (status < 0) |
903 | mlog_errno(status); |
904 | |
905 | mlog_exit(status); |
906 | return status; |
907 | } |
908 | |
909 | /* |
910 | * If the journal has been kmalloc'd it needs to be freed after this |
911 | * call. |
912 | */ |
913 | void ocfs2_journal_shutdown(struct ocfs2_super *osb) |
914 | { |
915 | struct ocfs2_journal *journal = NULL; |
916 | int status = 0; |
917 | struct inode *inode = NULL; |
918 | int num_running_trans = 0; |
919 | |
920 | mlog_entry_void(); |
921 | |
922 | BUG_ON(!osb); |
923 | |
924 | journal = osb->journal; |
925 | if (!journal) |
926 | goto done; |
927 | |
928 | inode = journal->j_inode; |
929 | |
930 | if (journal->j_state != OCFS2_JOURNAL_LOADED) |
931 | goto done; |
932 | |
933 | /* need to inc inode use count - jbd2_journal_destroy will iput. */ |
934 | if (!igrab(inode)) |
935 | BUG(); |
936 | |
937 | num_running_trans = atomic_read(&(osb->journal->j_num_trans)); |
938 | if (num_running_trans > 0) |
939 | mlog(0, "Shutting down journal: must wait on %d " |
940 | "running transactions!\n", |
941 | num_running_trans); |
942 | |
943 | /* Do a commit_cache here. It will flush our journal, *and* |
944 | * release any locks that are still held. |
945 | * set the SHUTDOWN flag and release the trans lock. |
946 | * the commit thread will take the trans lock for us below. */ |
947 | journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN; |
948 | |
949 | /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not |
950 | * drop the trans_lock (which we want to hold until we |
951 | * completely destroy the journal. */ |
952 | if (osb->commit_task) { |
953 | /* Wait for the commit thread */ |
954 | mlog(0, "Waiting for ocfs2commit to exit....\n"); |
955 | kthread_stop(osb->commit_task); |
956 | osb->commit_task = NULL; |
957 | } |
958 | |
959 | BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0); |
960 | |
961 | if (ocfs2_mount_local(osb)) { |
962 | jbd2_journal_lock_updates(journal->j_journal); |
963 | status = jbd2_journal_flush(journal->j_journal); |
964 | jbd2_journal_unlock_updates(journal->j_journal); |
965 | if (status < 0) |
966 | mlog_errno(status); |
967 | } |
968 | |
969 | if (status == 0) { |
970 | /* |
971 | * Do not toggle if flush was unsuccessful otherwise |
972 | * will leave dirty metadata in a "clean" journal |
973 | */ |
974 | status = ocfs2_journal_toggle_dirty(osb, 0, 0); |
975 | if (status < 0) |
976 | mlog_errno(status); |
977 | } |
978 | |
979 | /* Shutdown the kernel journal system */ |
980 | jbd2_journal_destroy(journal->j_journal); |
981 | journal->j_journal = NULL; |
982 | |
983 | OCFS2_I(inode)->ip_open_count--; |
984 | |
985 | /* unlock our journal */ |
986 | ocfs2_inode_unlock(inode, 1); |
987 | |
988 | brelse(journal->j_bh); |
989 | journal->j_bh = NULL; |
990 | |
991 | journal->j_state = OCFS2_JOURNAL_FREE; |
992 | |
993 | // up_write(&journal->j_trans_barrier); |
994 | done: |
995 | if (inode) |
996 | iput(inode); |
997 | mlog_exit_void(); |
998 | } |
999 | |
1000 | static void ocfs2_clear_journal_error(struct super_block *sb, |
1001 | journal_t *journal, |
1002 | int slot) |
1003 | { |
1004 | int olderr; |
1005 | |
1006 | olderr = jbd2_journal_errno(journal); |
1007 | if (olderr) { |
1008 | mlog(ML_ERROR, "File system error %d recorded in " |
1009 | "journal %u.\n", olderr, slot); |
1010 | mlog(ML_ERROR, "File system on device %s needs checking.\n", |
1011 | sb->s_id); |
1012 | |
1013 | jbd2_journal_ack_err(journal); |
1014 | jbd2_journal_clear_err(journal); |
1015 | } |
1016 | } |
1017 | |
1018 | int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) |
1019 | { |
1020 | int status = 0; |
1021 | struct ocfs2_super *osb; |
1022 | |
1023 | mlog_entry_void(); |
1024 | |
1025 | BUG_ON(!journal); |
1026 | |
1027 | osb = journal->j_osb; |
1028 | |
1029 | status = jbd2_journal_load(journal->j_journal); |
1030 | if (status < 0) { |
1031 | mlog(ML_ERROR, "Failed to load journal!\n"); |
1032 | goto done; |
1033 | } |
1034 | |
1035 | ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); |
1036 | |
1037 | status = ocfs2_journal_toggle_dirty(osb, 1, replayed); |
1038 | if (status < 0) { |
1039 | mlog_errno(status); |
1040 | goto done; |
1041 | } |
1042 | |
1043 | /* Launch the commit thread */ |
1044 | if (!local) { |
1045 | osb->commit_task = kthread_run(ocfs2_commit_thread, osb, |
1046 | "ocfs2cmt"); |
1047 | if (IS_ERR(osb->commit_task)) { |
1048 | status = PTR_ERR(osb->commit_task); |
1049 | osb->commit_task = NULL; |
1050 | mlog(ML_ERROR, "unable to launch ocfs2commit thread, " |
1051 | "error=%d", status); |
1052 | goto done; |
1053 | } |
1054 | } else |
1055 | osb->commit_task = NULL; |
1056 | |
1057 | done: |
1058 | mlog_exit(status); |
1059 | return status; |
1060 | } |
1061 | |
1062 | |
1063 | /* 'full' flag tells us whether we clear out all blocks or if we just |
1064 | * mark the journal clean */ |
1065 | int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full) |
1066 | { |
1067 | int status; |
1068 | |
1069 | mlog_entry_void(); |
1070 | |
1071 | BUG_ON(!journal); |
1072 | |
1073 | status = jbd2_journal_wipe(journal->j_journal, full); |
1074 | if (status < 0) { |
1075 | mlog_errno(status); |
1076 | goto bail; |
1077 | } |
1078 | |
1079 | status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0); |
1080 | if (status < 0) |
1081 | mlog_errno(status); |
1082 | |
1083 | bail: |
1084 | mlog_exit(status); |
1085 | return status; |
1086 | } |
1087 | |
1088 | static int ocfs2_recovery_completed(struct ocfs2_super *osb) |
1089 | { |
1090 | int empty; |
1091 | struct ocfs2_recovery_map *rm = osb->recovery_map; |
1092 | |
1093 | spin_lock(&osb->osb_lock); |
1094 | empty = (rm->rm_used == 0); |
1095 | spin_unlock(&osb->osb_lock); |
1096 | |
1097 | return empty; |
1098 | } |
1099 | |
1100 | void ocfs2_wait_for_recovery(struct ocfs2_super *osb) |
1101 | { |
1102 | wait_event(osb->recovery_event, ocfs2_recovery_completed(osb)); |
1103 | } |
1104 | |
1105 | /* |
1106 | * JBD Might read a cached version of another nodes journal file. We |
1107 | * don't want this as this file changes often and we get no |
1108 | * notification on those changes. The only way to be sure that we've |
1109 | * got the most up to date version of those blocks then is to force |
1110 | * read them off disk. Just searching through the buffer cache won't |
1111 | * work as there may be pages backing this file which are still marked |
1112 | * up to date. We know things can't change on this file underneath us |
1113 | * as we have the lock by now :) |
1114 | */ |
1115 | static int ocfs2_force_read_journal(struct inode *inode) |
1116 | { |
1117 | int status = 0; |
1118 | int i; |
1119 | u64 v_blkno, p_blkno, p_blocks, num_blocks; |
1120 | #define CONCURRENT_JOURNAL_FILL 32ULL |
1121 | struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; |
1122 | |
1123 | mlog_entry_void(); |
1124 | |
1125 | memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); |
1126 | |
1127 | num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); |
1128 | v_blkno = 0; |
1129 | while (v_blkno < num_blocks) { |
1130 | status = ocfs2_extent_map_get_blocks(inode, v_blkno, |
1131 | &p_blkno, &p_blocks, NULL); |
1132 | if (status < 0) { |
1133 | mlog_errno(status); |
1134 | goto bail; |
1135 | } |
1136 | |
1137 | if (p_blocks > CONCURRENT_JOURNAL_FILL) |
1138 | p_blocks = CONCURRENT_JOURNAL_FILL; |
1139 | |
1140 | /* We are reading journal data which should not |
1141 | * be put in the uptodate cache */ |
1142 | status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb), |
1143 | p_blkno, p_blocks, bhs); |
1144 | if (status < 0) { |
1145 | mlog_errno(status); |
1146 | goto bail; |
1147 | } |
1148 | |
1149 | for(i = 0; i < p_blocks; i++) { |
1150 | brelse(bhs[i]); |
1151 | bhs[i] = NULL; |
1152 | } |
1153 | |
1154 | v_blkno += p_blocks; |
1155 | } |
1156 | |
1157 | bail: |
1158 | for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) |
1159 | brelse(bhs[i]); |
1160 | mlog_exit(status); |
1161 | return status; |
1162 | } |
1163 | |
1164 | struct ocfs2_la_recovery_item { |
1165 | struct list_head lri_list; |
1166 | int lri_slot; |
1167 | struct ocfs2_dinode *lri_la_dinode; |
1168 | struct ocfs2_dinode *lri_tl_dinode; |
1169 | struct ocfs2_quota_recovery *lri_qrec; |
1170 | }; |
1171 | |
1172 | /* Does the second half of the recovery process. By this point, the |
1173 | * node is marked clean and can actually be considered recovered, |
1174 | * hence it's no longer in the recovery map, but there's still some |
1175 | * cleanup we can do which shouldn't happen within the recovery thread |
1176 | * as locking in that context becomes very difficult if we are to take |
1177 | * recovering nodes into account. |
1178 | * |
1179 | * NOTE: This function can and will sleep on recovery of other nodes |
1180 | * during cluster locking, just like any other ocfs2 process. |
1181 | */ |
1182 | void ocfs2_complete_recovery(struct work_struct *work) |
1183 | { |
1184 | int ret; |
1185 | struct ocfs2_journal *journal = |
1186 | container_of(work, struct ocfs2_journal, j_recovery_work); |
1187 | struct ocfs2_super *osb = journal->j_osb; |
1188 | struct ocfs2_dinode *la_dinode, *tl_dinode; |
1189 | struct ocfs2_la_recovery_item *item, *n; |
1190 | struct ocfs2_quota_recovery *qrec; |
1191 | LIST_HEAD(tmp_la_list); |
1192 | |
1193 | mlog_entry_void(); |
1194 | |
1195 | mlog(0, "completing recovery from keventd\n"); |
1196 | |
1197 | spin_lock(&journal->j_lock); |
1198 | list_splice_init(&journal->j_la_cleanups, &tmp_la_list); |
1199 | spin_unlock(&journal->j_lock); |
1200 | |
1201 | list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { |
1202 | list_del_init(&item->lri_list); |
1203 | |
1204 | mlog(0, "Complete recovery for slot %d\n", item->lri_slot); |
1205 | |
1206 | ocfs2_wait_on_quotas(osb); |
1207 | |
1208 | la_dinode = item->lri_la_dinode; |
1209 | if (la_dinode) { |
1210 | mlog(0, "Clean up local alloc %llu\n", |
1211 | (unsigned long long)le64_to_cpu(la_dinode->i_blkno)); |
1212 | |
1213 | ret = ocfs2_complete_local_alloc_recovery(osb, |
1214 | la_dinode); |
1215 | if (ret < 0) |
1216 | mlog_errno(ret); |
1217 | |
1218 | kfree(la_dinode); |
1219 | } |
1220 | |
1221 | tl_dinode = item->lri_tl_dinode; |
1222 | if (tl_dinode) { |
1223 | mlog(0, "Clean up truncate log %llu\n", |
1224 | (unsigned long long)le64_to_cpu(tl_dinode->i_blkno)); |
1225 | |
1226 | ret = ocfs2_complete_truncate_log_recovery(osb, |
1227 | tl_dinode); |
1228 | if (ret < 0) |
1229 | mlog_errno(ret); |
1230 | |
1231 | kfree(tl_dinode); |
1232 | } |
1233 | |
1234 | ret = ocfs2_recover_orphans(osb, item->lri_slot); |
1235 | if (ret < 0) |
1236 | mlog_errno(ret); |
1237 | |
1238 | qrec = item->lri_qrec; |
1239 | if (qrec) { |
1240 | mlog(0, "Recovering quota files"); |
1241 | ret = ocfs2_finish_quota_recovery(osb, qrec, |
1242 | item->lri_slot); |
1243 | if (ret < 0) |
1244 | mlog_errno(ret); |
1245 | /* Recovery info is already freed now */ |
1246 | } |
1247 | |
1248 | kfree(item); |
1249 | } |
1250 | |
1251 | mlog(0, "Recovery completion\n"); |
1252 | mlog_exit_void(); |
1253 | } |
1254 | |
1255 | /* NOTE: This function always eats your references to la_dinode and |
1256 | * tl_dinode, either manually on error, or by passing them to |
1257 | * ocfs2_complete_recovery */ |
1258 | static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, |
1259 | int slot_num, |
1260 | struct ocfs2_dinode *la_dinode, |
1261 | struct ocfs2_dinode *tl_dinode, |
1262 | struct ocfs2_quota_recovery *qrec) |
1263 | { |
1264 | struct ocfs2_la_recovery_item *item; |
1265 | |
1266 | item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS); |
1267 | if (!item) { |
1268 | /* Though we wish to avoid it, we are in fact safe in |
1269 | * skipping local alloc cleanup as fsck.ocfs2 is more |
1270 | * than capable of reclaiming unused space. */ |
1271 | if (la_dinode) |
1272 | kfree(la_dinode); |
1273 | |
1274 | if (tl_dinode) |
1275 | kfree(tl_dinode); |
1276 | |
1277 | if (qrec) |
1278 | ocfs2_free_quota_recovery(qrec); |
1279 | |
1280 | mlog_errno(-ENOMEM); |
1281 | return; |
1282 | } |
1283 | |
1284 | INIT_LIST_HEAD(&item->lri_list); |
1285 | item->lri_la_dinode = la_dinode; |
1286 | item->lri_slot = slot_num; |
1287 | item->lri_tl_dinode = tl_dinode; |
1288 | item->lri_qrec = qrec; |
1289 | |
1290 | spin_lock(&journal->j_lock); |
1291 | list_add_tail(&item->lri_list, &journal->j_la_cleanups); |
1292 | queue_work(ocfs2_wq, &journal->j_recovery_work); |
1293 | spin_unlock(&journal->j_lock); |
1294 | } |
1295 | |
1296 | /* Called by the mount code to queue recovery the last part of |
1297 | * recovery for it's own and offline slot(s). */ |
1298 | void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) |
1299 | { |
1300 | struct ocfs2_journal *journal = osb->journal; |
1301 | |
1302 | /* No need to queue up our truncate_log as regular cleanup will catch |
1303 | * that */ |
1304 | ocfs2_queue_recovery_completion(journal, osb->slot_num, |
1305 | osb->local_alloc_copy, NULL, NULL); |
1306 | ocfs2_schedule_truncate_log_flush(osb, 0); |
1307 | |
1308 | osb->local_alloc_copy = NULL; |
1309 | osb->dirty = 0; |
1310 | |
1311 | /* queue to recover orphan slots for all offline slots */ |
1312 | ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); |
1313 | ocfs2_queue_replay_slots(osb); |
1314 | ocfs2_free_replay_slots(osb); |
1315 | } |
1316 | |
1317 | void ocfs2_complete_quota_recovery(struct ocfs2_super *osb) |
1318 | { |
1319 | if (osb->quota_rec) { |
1320 | ocfs2_queue_recovery_completion(osb->journal, |
1321 | osb->slot_num, |
1322 | NULL, |
1323 | NULL, |
1324 | osb->quota_rec); |
1325 | osb->quota_rec = NULL; |
1326 | } |
1327 | } |
1328 | |
1329 | static int __ocfs2_recovery_thread(void *arg) |
1330 | { |
1331 | int status, node_num, slot_num; |
1332 | struct ocfs2_super *osb = arg; |
1333 | struct ocfs2_recovery_map *rm = osb->recovery_map; |
1334 | int *rm_quota = NULL; |
1335 | int rm_quota_used = 0, i; |
1336 | struct ocfs2_quota_recovery *qrec; |
1337 | |
1338 | mlog_entry_void(); |
1339 | |
1340 | status = ocfs2_wait_on_mount(osb); |
1341 | if (status < 0) { |
1342 | goto bail; |
1343 | } |
1344 | |
1345 | rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS); |
1346 | if (!rm_quota) { |
1347 | status = -ENOMEM; |
1348 | goto bail; |
1349 | } |
1350 | restart: |
1351 | status = ocfs2_super_lock(osb, 1); |
1352 | if (status < 0) { |
1353 | mlog_errno(status); |
1354 | goto bail; |
1355 | } |
1356 | |
1357 | status = ocfs2_compute_replay_slots(osb); |
1358 | if (status < 0) |
1359 | mlog_errno(status); |
1360 | |
1361 | /* queue recovery for our own slot */ |
1362 | ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL, |
1363 | NULL, NULL); |
1364 | |
1365 | spin_lock(&osb->osb_lock); |
1366 | while (rm->rm_used) { |
1367 | /* It's always safe to remove entry zero, as we won't |
1368 | * clear it until ocfs2_recover_node() has succeeded. */ |
1369 | node_num = rm->rm_entries[0]; |
1370 | spin_unlock(&osb->osb_lock); |
1371 | mlog(0, "checking node %d\n", node_num); |
1372 | slot_num = ocfs2_node_num_to_slot(osb, node_num); |
1373 | if (slot_num == -ENOENT) { |
1374 | status = 0; |
1375 | mlog(0, "no slot for this node, so no recovery" |
1376 | "required.\n"); |
1377 | goto skip_recovery; |
1378 | } |
1379 | mlog(0, "node %d was using slot %d\n", node_num, slot_num); |
1380 | |
1381 | /* It is a bit subtle with quota recovery. We cannot do it |
1382 | * immediately because we have to obtain cluster locks from |
1383 | * quota files and we also don't want to just skip it because |
1384 | * then quota usage would be out of sync until some node takes |
1385 | * the slot. So we remember which nodes need quota recovery |
1386 | * and when everything else is done, we recover quotas. */ |
1387 | for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++); |
1388 | if (i == rm_quota_used) |
1389 | rm_quota[rm_quota_used++] = slot_num; |
1390 | |
1391 | status = ocfs2_recover_node(osb, node_num, slot_num); |
1392 | skip_recovery: |
1393 | if (!status) { |
1394 | ocfs2_recovery_map_clear(osb, node_num); |
1395 | } else { |
1396 | mlog(ML_ERROR, |
1397 | "Error %d recovering node %d on device (%u,%u)!\n", |
1398 | status, node_num, |
1399 | MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); |
1400 | mlog(ML_ERROR, "Volume requires unmount.\n"); |
1401 | } |
1402 | |
1403 | spin_lock(&osb->osb_lock); |
1404 | } |
1405 | spin_unlock(&osb->osb_lock); |
1406 | mlog(0, "All nodes recovered\n"); |
1407 | |
1408 | /* Refresh all journal recovery generations from disk */ |
1409 | status = ocfs2_check_journals_nolocks(osb); |
1410 | status = (status == -EROFS) ? 0 : status; |
1411 | if (status < 0) |
1412 | mlog_errno(status); |
1413 | |
1414 | /* Now it is right time to recover quotas... We have to do this under |
1415 | * superblock lock so that noone can start using the slot (and crash) |
1416 | * before we recover it */ |
1417 | for (i = 0; i < rm_quota_used; i++) { |
1418 | qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]); |
1419 | if (IS_ERR(qrec)) { |
1420 | status = PTR_ERR(qrec); |
1421 | mlog_errno(status); |
1422 | continue; |
1423 | } |
1424 | ocfs2_queue_recovery_completion(osb->journal, rm_quota[i], |
1425 | NULL, NULL, qrec); |
1426 | } |
1427 | |
1428 | ocfs2_super_unlock(osb, 1); |
1429 | |
1430 | /* queue recovery for offline slots */ |
1431 | ocfs2_queue_replay_slots(osb); |
1432 | |
1433 | bail: |
1434 | mutex_lock(&osb->recovery_lock); |
1435 | if (!status && !ocfs2_recovery_completed(osb)) { |
1436 | mutex_unlock(&osb->recovery_lock); |
1437 | goto restart; |
1438 | } |
1439 | |
1440 | ocfs2_free_replay_slots(osb); |
1441 | osb->recovery_thread_task = NULL; |
1442 | mb(); /* sync with ocfs2_recovery_thread_running */ |
1443 | wake_up(&osb->recovery_event); |
1444 | |
1445 | mutex_unlock(&osb->recovery_lock); |
1446 | |
1447 | if (rm_quota) |
1448 | kfree(rm_quota); |
1449 | |
1450 | mlog_exit(status); |
1451 | /* no one is callint kthread_stop() for us so the kthread() api |
1452 | * requires that we call do_exit(). And it isn't exported, but |
1453 | * complete_and_exit() seems to be a minimal wrapper around it. */ |
1454 | complete_and_exit(NULL, status); |
1455 | return status; |
1456 | } |
1457 | |
1458 | void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) |
1459 | { |
1460 | mlog_entry("(node_num=%d, osb->node_num = %d)\n", |
1461 | node_num, osb->node_num); |
1462 | |
1463 | mutex_lock(&osb->recovery_lock); |
1464 | if (osb->disable_recovery) |
1465 | goto out; |
1466 | |
1467 | /* People waiting on recovery will wait on |
1468 | * the recovery map to empty. */ |
1469 | if (ocfs2_recovery_map_set(osb, node_num)) |
1470 | mlog(0, "node %d already in recovery map.\n", node_num); |
1471 | |
1472 | mlog(0, "starting recovery thread...\n"); |
1473 | |
1474 | if (osb->recovery_thread_task) |
1475 | goto out; |
1476 | |
1477 | osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb, |
1478 | "ocfs2rec"); |
1479 | if (IS_ERR(osb->recovery_thread_task)) { |
1480 | mlog_errno((int)PTR_ERR(osb->recovery_thread_task)); |
1481 | osb->recovery_thread_task = NULL; |
1482 | } |
1483 | |
1484 | out: |
1485 | mutex_unlock(&osb->recovery_lock); |
1486 | wake_up(&osb->recovery_event); |
1487 | |
1488 | mlog_exit_void(); |
1489 | } |
1490 | |
1491 | static int ocfs2_read_journal_inode(struct ocfs2_super *osb, |
1492 | int slot_num, |
1493 | struct buffer_head **bh, |
1494 | struct inode **ret_inode) |
1495 | { |
1496 | int status = -EACCES; |
1497 | struct inode *inode = NULL; |
1498 | |
1499 | BUG_ON(slot_num >= osb->max_slots); |
1500 | |
1501 | inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, |
1502 | slot_num); |
1503 | if (!inode || is_bad_inode(inode)) { |
1504 | mlog_errno(status); |
1505 | goto bail; |
1506 | } |
1507 | SET_INODE_JOURNAL(inode); |
1508 | |
1509 | status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE); |
1510 | if (status < 0) { |
1511 | mlog_errno(status); |
1512 | goto bail; |
1513 | } |
1514 | |
1515 | status = 0; |
1516 | |
1517 | bail: |
1518 | if (inode) { |
1519 | if (status || !ret_inode) |
1520 | iput(inode); |
1521 | else |
1522 | *ret_inode = inode; |
1523 | } |
1524 | return status; |
1525 | } |
1526 | |
1527 | /* Does the actual journal replay and marks the journal inode as |
1528 | * clean. Will only replay if the journal inode is marked dirty. */ |
1529 | static int ocfs2_replay_journal(struct ocfs2_super *osb, |
1530 | int node_num, |
1531 | int slot_num) |
1532 | { |
1533 | int status; |
1534 | int got_lock = 0; |
1535 | unsigned int flags; |
1536 | struct inode *inode = NULL; |
1537 | struct ocfs2_dinode *fe; |
1538 | journal_t *journal = NULL; |
1539 | struct buffer_head *bh = NULL; |
1540 | u32 slot_reco_gen; |
1541 | |
1542 | status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode); |
1543 | if (status) { |
1544 | mlog_errno(status); |
1545 | goto done; |
1546 | } |
1547 | |
1548 | fe = (struct ocfs2_dinode *)bh->b_data; |
1549 | slot_reco_gen = ocfs2_get_recovery_generation(fe); |
1550 | brelse(bh); |
1551 | bh = NULL; |
1552 | |
1553 | /* |
1554 | * As the fs recovery is asynchronous, there is a small chance that |
1555 | * another node mounted (and recovered) the slot before the recovery |
1556 | * thread could get the lock. To handle that, we dirty read the journal |
1557 | * inode for that slot to get the recovery generation. If it is |
1558 | * different than what we expected, the slot has been recovered. |
1559 | * If not, it needs recovery. |
1560 | */ |
1561 | if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { |
1562 | mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num, |
1563 | osb->slot_recovery_generations[slot_num], slot_reco_gen); |
1564 | osb->slot_recovery_generations[slot_num] = slot_reco_gen; |
1565 | status = -EBUSY; |
1566 | goto done; |
1567 | } |
1568 | |
1569 | /* Continue with recovery as the journal has not yet been recovered */ |
1570 | |
1571 | status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); |
1572 | if (status < 0) { |
1573 | mlog(0, "status returned from ocfs2_inode_lock=%d\n", status); |
1574 | if (status != -ERESTARTSYS) |
1575 | mlog(ML_ERROR, "Could not lock journal!\n"); |
1576 | goto done; |
1577 | } |
1578 | got_lock = 1; |
1579 | |
1580 | fe = (struct ocfs2_dinode *) bh->b_data; |
1581 | |
1582 | flags = le32_to_cpu(fe->id1.journal1.ij_flags); |
1583 | slot_reco_gen = ocfs2_get_recovery_generation(fe); |
1584 | |
1585 | if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { |
1586 | mlog(0, "No recovery required for node %d\n", node_num); |
1587 | /* Refresh recovery generation for the slot */ |
1588 | osb->slot_recovery_generations[slot_num] = slot_reco_gen; |
1589 | goto done; |
1590 | } |
1591 | |
1592 | /* we need to run complete recovery for offline orphan slots */ |
1593 | ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); |
1594 | |
1595 | mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n", |
1596 | node_num, slot_num, |
1597 | MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); |
1598 | |
1599 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); |
1600 | |
1601 | status = ocfs2_force_read_journal(inode); |
1602 | if (status < 0) { |
1603 | mlog_errno(status); |
1604 | goto done; |
1605 | } |
1606 | |
1607 | mlog(0, "calling journal_init_inode\n"); |
1608 | journal = jbd2_journal_init_inode(inode); |
1609 | if (journal == NULL) { |
1610 | mlog(ML_ERROR, "Linux journal layer error\n"); |
1611 | status = -EIO; |
1612 | goto done; |
1613 | } |
1614 | |
1615 | status = jbd2_journal_load(journal); |
1616 | if (status < 0) { |
1617 | mlog_errno(status); |
1618 | if (!igrab(inode)) |
1619 | BUG(); |
1620 | jbd2_journal_destroy(journal); |
1621 | goto done; |
1622 | } |
1623 | |
1624 | ocfs2_clear_journal_error(osb->sb, journal, slot_num); |
1625 | |
1626 | /* wipe the journal */ |
1627 | mlog(0, "flushing the journal.\n"); |
1628 | jbd2_journal_lock_updates(journal); |
1629 | status = jbd2_journal_flush(journal); |
1630 | jbd2_journal_unlock_updates(journal); |
1631 | if (status < 0) |
1632 | mlog_errno(status); |
1633 | |
1634 | /* This will mark the node clean */ |
1635 | flags = le32_to_cpu(fe->id1.journal1.ij_flags); |
1636 | flags &= ~OCFS2_JOURNAL_DIRTY_FL; |
1637 | fe->id1.journal1.ij_flags = cpu_to_le32(flags); |
1638 | |
1639 | /* Increment recovery generation to indicate successful recovery */ |
1640 | ocfs2_bump_recovery_generation(fe); |
1641 | osb->slot_recovery_generations[slot_num] = |
1642 | ocfs2_get_recovery_generation(fe); |
1643 | |
1644 | ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); |
1645 | status = ocfs2_write_block(osb, bh, inode); |
1646 | if (status < 0) |
1647 | mlog_errno(status); |
1648 | |
1649 | if (!igrab(inode)) |
1650 | BUG(); |
1651 | |
1652 | jbd2_journal_destroy(journal); |
1653 | |
1654 | done: |
1655 | /* drop the lock on this nodes journal */ |
1656 | if (got_lock) |
1657 | ocfs2_inode_unlock(inode, 1); |
1658 | |
1659 | if (inode) |
1660 | iput(inode); |
1661 | |
1662 | brelse(bh); |
1663 | |
1664 | mlog_exit(status); |
1665 | return status; |
1666 | } |
1667 | |
1668 | /* |
1669 | * Do the most important parts of node recovery: |
1670 | * - Replay it's journal |
1671 | * - Stamp a clean local allocator file |
1672 | * - Stamp a clean truncate log |
1673 | * - Mark the node clean |
1674 | * |
1675 | * If this function completes without error, a node in OCFS2 can be |
1676 | * said to have been safely recovered. As a result, failure during the |
1677 | * second part of a nodes recovery process (local alloc recovery) is |
1678 | * far less concerning. |
1679 | */ |
1680 | static int ocfs2_recover_node(struct ocfs2_super *osb, |
1681 | int node_num, int slot_num) |
1682 | { |
1683 | int status = 0; |
1684 | struct ocfs2_dinode *la_copy = NULL; |
1685 | struct ocfs2_dinode *tl_copy = NULL; |
1686 | |
1687 | mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n", |
1688 | node_num, slot_num, osb->node_num); |
1689 | |
1690 | /* Should not ever be called to recover ourselves -- in that |
1691 | * case we should've called ocfs2_journal_load instead. */ |
1692 | BUG_ON(osb->node_num == node_num); |
1693 | |
1694 | status = ocfs2_replay_journal(osb, node_num, slot_num); |
1695 | if (status < 0) { |
1696 | if (status == -EBUSY) { |
1697 | mlog(0, "Skipping recovery for slot %u (node %u) " |
1698 | "as another node has recovered it\n", slot_num, |
1699 | node_num); |
1700 | status = 0; |
1701 | goto done; |
1702 | } |
1703 | mlog_errno(status); |
1704 | goto done; |
1705 | } |
1706 | |
1707 | /* Stamp a clean local alloc file AFTER recovering the journal... */ |
1708 | status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy); |
1709 | if (status < 0) { |
1710 | mlog_errno(status); |
1711 | goto done; |
1712 | } |
1713 | |
1714 | /* An error from begin_truncate_log_recovery is not |
1715 | * serious enough to warrant halting the rest of |
1716 | * recovery. */ |
1717 | status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy); |
1718 | if (status < 0) |
1719 | mlog_errno(status); |
1720 | |
1721 | /* Likewise, this would be a strange but ultimately not so |
1722 | * harmful place to get an error... */ |
1723 | status = ocfs2_clear_slot(osb, slot_num); |
1724 | if (status < 0) |
1725 | mlog_errno(status); |
1726 | |
1727 | /* This will kfree the memory pointed to by la_copy and tl_copy */ |
1728 | ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy, |
1729 | tl_copy, NULL); |
1730 | |
1731 | status = 0; |
1732 | done: |
1733 | |
1734 | mlog_exit(status); |
1735 | return status; |
1736 | } |
1737 | |
1738 | /* Test node liveness by trylocking his journal. If we get the lock, |
1739 | * we drop it here. Return 0 if we got the lock, -EAGAIN if node is |
1740 | * still alive (we couldn't get the lock) and < 0 on error. */ |
1741 | static int ocfs2_trylock_journal(struct ocfs2_super *osb, |
1742 | int slot_num) |
1743 | { |
1744 | int status, flags; |
1745 | struct inode *inode = NULL; |
1746 | |
1747 | inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, |
1748 | slot_num); |
1749 | if (inode == NULL) { |
1750 | mlog(ML_ERROR, "access error\n"); |
1751 | status = -EACCES; |
1752 | goto bail; |
1753 | } |
1754 | if (is_bad_inode(inode)) { |
1755 | mlog(ML_ERROR, "access error (bad inode)\n"); |
1756 | iput(inode); |
1757 | inode = NULL; |
1758 | status = -EACCES; |
1759 | goto bail; |
1760 | } |
1761 | SET_INODE_JOURNAL(inode); |
1762 | |
1763 | flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE; |
1764 | status = ocfs2_inode_lock_full(inode, NULL, 1, flags); |
1765 | if (status < 0) { |
1766 | if (status != -EAGAIN) |
1767 | mlog_errno(status); |
1768 | goto bail; |
1769 | } |
1770 | |
1771 | ocfs2_inode_unlock(inode, 1); |
1772 | bail: |
1773 | if (inode) |
1774 | iput(inode); |
1775 | |
1776 | return status; |
1777 | } |
1778 | |
1779 | /* Call this underneath ocfs2_super_lock. It also assumes that the |
1780 | * slot info struct has been updated from disk. */ |
1781 | int ocfs2_mark_dead_nodes(struct ocfs2_super *osb) |
1782 | { |
1783 | unsigned int node_num; |
1784 | int status, i; |
1785 | u32 gen; |
1786 | struct buffer_head *bh = NULL; |
1787 | struct ocfs2_dinode *di; |
1788 | |
1789 | /* This is called with the super block cluster lock, so we |
1790 | * know that the slot map can't change underneath us. */ |
1791 | |
1792 | for (i = 0; i < osb->max_slots; i++) { |
1793 | /* Read journal inode to get the recovery generation */ |
1794 | status = ocfs2_read_journal_inode(osb, i, &bh, NULL); |
1795 | if (status) { |
1796 | mlog_errno(status); |
1797 | goto bail; |
1798 | } |
1799 | di = (struct ocfs2_dinode *)bh->b_data; |
1800 | gen = ocfs2_get_recovery_generation(di); |
1801 | brelse(bh); |
1802 | bh = NULL; |
1803 | |
1804 | spin_lock(&osb->osb_lock); |
1805 | osb->slot_recovery_generations[i] = gen; |
1806 | |
1807 | mlog(0, "Slot %u recovery generation is %u\n", i, |
1808 | osb->slot_recovery_generations[i]); |
1809 | |
1810 | if (i == osb->slot_num) { |
1811 | spin_unlock(&osb->osb_lock); |
1812 | continue; |
1813 | } |
1814 | |
1815 | status = ocfs2_slot_to_node_num_locked(osb, i, &node_num); |
1816 | if (status == -ENOENT) { |
1817 | spin_unlock(&osb->osb_lock); |
1818 | continue; |
1819 | } |
1820 | |
1821 | if (__ocfs2_recovery_map_test(osb, node_num)) { |
1822 | spin_unlock(&osb->osb_lock); |
1823 | continue; |
1824 | } |
1825 | spin_unlock(&osb->osb_lock); |
1826 | |
1827 | /* Ok, we have a slot occupied by another node which |
1828 | * is not in the recovery map. We trylock his journal |
1829 | * file here to test if he's alive. */ |
1830 | status = ocfs2_trylock_journal(osb, i); |
1831 | if (!status) { |
1832 | /* Since we're called from mount, we know that |
1833 | * the recovery thread can't race us on |
1834 | * setting / checking the recovery bits. */ |
1835 | ocfs2_recovery_thread(osb, node_num); |
1836 | } else if ((status < 0) && (status != -EAGAIN)) { |
1837 | mlog_errno(status); |
1838 | goto bail; |
1839 | } |
1840 | } |
1841 | |
1842 | status = 0; |
1843 | bail: |
1844 | mlog_exit(status); |
1845 | return status; |
1846 | } |
1847 | |
1848 | /* |
1849 | * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some |
1850 | * randomness to the timeout to minimize multple nodes firing the timer at the |
1851 | * same time. |
1852 | */ |
1853 | static inline unsigned long ocfs2_orphan_scan_timeout(void) |
1854 | { |
1855 | unsigned long time; |
1856 | |
1857 | get_random_bytes(&time, sizeof(time)); |
1858 | time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000); |
1859 | return msecs_to_jiffies(time); |
1860 | } |
1861 | |
1862 | /* |
1863 | * ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for |
1864 | * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This |
1865 | * is done to catch any orphans that are left over in orphan directories. |
1866 | * |
1867 | * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT |
1868 | * seconds. It gets an EX lock on os_lockres and checks sequence number |
1869 | * stored in LVB. If the sequence number has changed, it means some other |
1870 | * node has done the scan. This node skips the scan and tracks the |
1871 | * sequence number. If the sequence number didn't change, it means a scan |
1872 | * hasn't happened. The node queues a scan and increments the |
1873 | * sequence number in the LVB. |
1874 | */ |
1875 | void ocfs2_queue_orphan_scan(struct ocfs2_super *osb) |
1876 | { |
1877 | struct ocfs2_orphan_scan *os; |
1878 | int status, i; |
1879 | u32 seqno = 0; |
1880 | |
1881 | os = &osb->osb_orphan_scan; |
1882 | |
1883 | if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) |
1884 | goto out; |
1885 | |
1886 | status = ocfs2_orphan_scan_lock(osb, &seqno); |
1887 | if (status < 0) { |
1888 | if (status != -EAGAIN) |
1889 | mlog_errno(status); |
1890 | goto out; |
1891 | } |
1892 | |
1893 | /* Do no queue the tasks if the volume is being umounted */ |
1894 | if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) |
1895 | goto unlock; |
1896 | |
1897 | if (os->os_seqno != seqno) { |
1898 | os->os_seqno = seqno; |
1899 | goto unlock; |
1900 | } |
1901 | |
1902 | for (i = 0; i < osb->max_slots; i++) |
1903 | ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL, |
1904 | NULL); |
1905 | /* |
1906 | * We queued a recovery on orphan slots, increment the sequence |
1907 | * number and update LVB so other node will skip the scan for a while |
1908 | */ |
1909 | seqno++; |
1910 | os->os_count++; |
1911 | os->os_scantime = CURRENT_TIME; |
1912 | unlock: |
1913 | ocfs2_orphan_scan_unlock(osb, seqno); |
1914 | out: |
1915 | return; |
1916 | } |
1917 | |
1918 | /* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */ |
1919 | void ocfs2_orphan_scan_work(struct work_struct *work) |
1920 | { |
1921 | struct ocfs2_orphan_scan *os; |
1922 | struct ocfs2_super *osb; |
1923 | |
1924 | os = container_of(work, struct ocfs2_orphan_scan, |
1925 | os_orphan_scan_work.work); |
1926 | osb = os->os_osb; |
1927 | |
1928 | mutex_lock(&os->os_lock); |
1929 | ocfs2_queue_orphan_scan(osb); |
1930 | if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) |
1931 | schedule_delayed_work(&os->os_orphan_scan_work, |
1932 | ocfs2_orphan_scan_timeout()); |
1933 | mutex_unlock(&os->os_lock); |
1934 | } |
1935 | |
1936 | void ocfs2_orphan_scan_stop(struct ocfs2_super *osb) |
1937 | { |
1938 | struct ocfs2_orphan_scan *os; |
1939 | |
1940 | os = &osb->osb_orphan_scan; |
1941 | if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) { |
1942 | atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); |
1943 | mutex_lock(&os->os_lock); |
1944 | cancel_delayed_work(&os->os_orphan_scan_work); |
1945 | mutex_unlock(&os->os_lock); |
1946 | } |
1947 | } |
1948 | |
1949 | void ocfs2_orphan_scan_init(struct ocfs2_super *osb) |
1950 | { |
1951 | struct ocfs2_orphan_scan *os; |
1952 | |
1953 | os = &osb->osb_orphan_scan; |
1954 | os->os_osb = osb; |
1955 | os->os_count = 0; |
1956 | os->os_seqno = 0; |
1957 | mutex_init(&os->os_lock); |
1958 | INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work); |
1959 | } |
1960 | |
1961 | void ocfs2_orphan_scan_start(struct ocfs2_super *osb) |
1962 | { |
1963 | struct ocfs2_orphan_scan *os; |
1964 | |
1965 | os = &osb->osb_orphan_scan; |
1966 | os->os_scantime = CURRENT_TIME; |
1967 | if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb)) |
1968 | atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); |
1969 | else { |
1970 | atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE); |
1971 | schedule_delayed_work(&os->os_orphan_scan_work, |
1972 | ocfs2_orphan_scan_timeout()); |
1973 | } |
1974 | } |
1975 | |
1976 | struct ocfs2_orphan_filldir_priv { |
1977 | struct inode *head; |
1978 | struct ocfs2_super *osb; |
1979 | }; |
1980 | |
1981 | static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len, |
1982 | loff_t pos, u64 ino, unsigned type) |
1983 | { |
1984 | struct ocfs2_orphan_filldir_priv *p = priv; |
1985 | struct inode *iter; |
1986 | |
1987 | if (name_len == 1 && !strncmp(".", name, 1)) |
1988 | return 0; |
1989 | if (name_len == 2 && !strncmp("..", name, 2)) |
1990 | return 0; |
1991 | |
1992 | /* Skip bad inodes so that recovery can continue */ |
1993 | iter = ocfs2_iget(p->osb, ino, |
1994 | OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0); |
1995 | if (IS_ERR(iter)) |
1996 | return 0; |
1997 | |
1998 | mlog(0, "queue orphan %llu\n", |
1999 | (unsigned long long)OCFS2_I(iter)->ip_blkno); |
2000 | /* No locking is required for the next_orphan queue as there |
2001 | * is only ever a single process doing orphan recovery. */ |
2002 | OCFS2_I(iter)->ip_next_orphan = p->head; |
2003 | p->head = iter; |
2004 | |
2005 | return 0; |
2006 | } |
2007 | |
2008 | static int ocfs2_queue_orphans(struct ocfs2_super *osb, |
2009 | int slot, |
2010 | struct inode **head) |
2011 | { |
2012 | int status; |
2013 | struct inode *orphan_dir_inode = NULL; |
2014 | struct ocfs2_orphan_filldir_priv priv; |
2015 | loff_t pos = 0; |
2016 | |
2017 | priv.osb = osb; |
2018 | priv.head = *head; |
2019 | |
2020 | orphan_dir_inode = ocfs2_get_system_file_inode(osb, |
2021 | ORPHAN_DIR_SYSTEM_INODE, |
2022 | slot); |
2023 | if (!orphan_dir_inode) { |
2024 | status = -ENOENT; |
2025 | mlog_errno(status); |
2026 | return status; |
2027 | } |
2028 | |
2029 | mutex_lock(&orphan_dir_inode->i_mutex); |
2030 | status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); |
2031 | if (status < 0) { |
2032 | mlog_errno(status); |
2033 | goto out; |
2034 | } |
2035 | |
2036 | status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv, |
2037 | ocfs2_orphan_filldir); |
2038 | if (status) { |
2039 | mlog_errno(status); |
2040 | goto out_cluster; |
2041 | } |
2042 | |
2043 | *head = priv.head; |
2044 | |
2045 | out_cluster: |
2046 | ocfs2_inode_unlock(orphan_dir_inode, 0); |
2047 | out: |
2048 | mutex_unlock(&orphan_dir_inode->i_mutex); |
2049 | iput(orphan_dir_inode); |
2050 | return status; |
2051 | } |
2052 | |
2053 | static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb, |
2054 | int slot) |
2055 | { |
2056 | int ret; |
2057 | |
2058 | spin_lock(&osb->osb_lock); |
2059 | ret = !osb->osb_orphan_wipes[slot]; |
2060 | spin_unlock(&osb->osb_lock); |
2061 | return ret; |
2062 | } |
2063 | |
2064 | static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb, |
2065 | int slot) |
2066 | { |
2067 | spin_lock(&osb->osb_lock); |
2068 | /* Mark ourselves such that new processes in delete_inode() |
2069 | * know to quit early. */ |
2070 | ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot); |
2071 | while (osb->osb_orphan_wipes[slot]) { |
2072 | /* If any processes are already in the middle of an |
2073 | * orphan wipe on this dir, then we need to wait for |
2074 | * them. */ |
2075 | spin_unlock(&osb->osb_lock); |
2076 | wait_event_interruptible(osb->osb_wipe_event, |
2077 | ocfs2_orphan_recovery_can_continue(osb, slot)); |
2078 | spin_lock(&osb->osb_lock); |
2079 | } |
2080 | spin_unlock(&osb->osb_lock); |
2081 | } |
2082 | |
2083 | static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb, |
2084 | int slot) |
2085 | { |
2086 | ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot); |
2087 | } |
2088 | |
2089 | /* |
2090 | * Orphan recovery. Each mounted node has it's own orphan dir which we |
2091 | * must run during recovery. Our strategy here is to build a list of |
2092 | * the inodes in the orphan dir and iget/iput them. The VFS does |
2093 | * (most) of the rest of the work. |
2094 | * |
2095 | * Orphan recovery can happen at any time, not just mount so we have a |
2096 | * couple of extra considerations. |
2097 | * |
2098 | * - We grab as many inodes as we can under the orphan dir lock - |
2099 | * doing iget() outside the orphan dir risks getting a reference on |
2100 | * an invalid inode. |
2101 | * - We must be sure not to deadlock with other processes on the |
2102 | * system wanting to run delete_inode(). This can happen when they go |
2103 | * to lock the orphan dir and the orphan recovery process attempts to |
2104 | * iget() inside the orphan dir lock. This can be avoided by |
2105 | * advertising our state to ocfs2_delete_inode(). |
2106 | */ |
2107 | static int ocfs2_recover_orphans(struct ocfs2_super *osb, |
2108 | int slot) |
2109 | { |
2110 | int ret = 0; |
2111 | struct inode *inode = NULL; |
2112 | struct inode *iter; |
2113 | struct ocfs2_inode_info *oi; |
2114 | |
2115 | mlog(0, "Recover inodes from orphan dir in slot %d\n", slot); |
2116 | |
2117 | ocfs2_mark_recovering_orphan_dir(osb, slot); |
2118 | ret = ocfs2_queue_orphans(osb, slot, &inode); |
2119 | ocfs2_clear_recovering_orphan_dir(osb, slot); |
2120 | |
2121 | /* Error here should be noted, but we want to continue with as |
2122 | * many queued inodes as we've got. */ |
2123 | if (ret) |
2124 | mlog_errno(ret); |
2125 | |
2126 | while (inode) { |
2127 | oi = OCFS2_I(inode); |
2128 | mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno); |
2129 | |
2130 | iter = oi->ip_next_orphan; |
2131 | |
2132 | spin_lock(&oi->ip_lock); |
2133 | /* The remote delete code may have set these on the |
2134 | * assumption that the other node would wipe them |
2135 | * successfully. If they are still in the node's |
2136 | * orphan dir, we need to reset that state. */ |
2137 | oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE); |
2138 | |
2139 | /* Set the proper information to get us going into |
2140 | * ocfs2_delete_inode. */ |
2141 | oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; |
2142 | spin_unlock(&oi->ip_lock); |
2143 | |
2144 | iput(inode); |
2145 | |
2146 | inode = iter; |
2147 | } |
2148 | |
2149 | return ret; |
2150 | } |
2151 | |
2152 | static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota) |
2153 | { |
2154 | /* This check is good because ocfs2 will wait on our recovery |
2155 | * thread before changing it to something other than MOUNTED |
2156 | * or DISABLED. */ |
2157 | wait_event(osb->osb_mount_event, |
2158 | (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) || |
2159 | atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS || |
2160 | atomic_read(&osb->vol_state) == VOLUME_DISABLED); |
2161 | |
2162 | /* If there's an error on mount, then we may never get to the |
2163 | * MOUNTED flag, but this is set right before |
2164 | * dismount_volume() so we can trust it. */ |
2165 | if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { |
2166 | mlog(0, "mount error, exiting!\n"); |
2167 | return -EBUSY; |
2168 | } |
2169 | |
2170 | return 0; |
2171 | } |
2172 | |
2173 | static int ocfs2_commit_thread(void *arg) |
2174 | { |
2175 | int status; |
2176 | struct ocfs2_super *osb = arg; |
2177 | struct ocfs2_journal *journal = osb->journal; |
2178 | |
2179 | /* we can trust j_num_trans here because _should_stop() is only set in |
2180 | * shutdown and nobody other than ourselves should be able to start |
2181 | * transactions. committing on shutdown might take a few iterations |
2182 | * as final transactions put deleted inodes on the list */ |
2183 | while (!(kthread_should_stop() && |
2184 | atomic_read(&journal->j_num_trans) == 0)) { |
2185 | |
2186 | wait_event_interruptible(osb->checkpoint_event, |
2187 | atomic_read(&journal->j_num_trans) |
2188 | || kthread_should_stop()); |
2189 | |
2190 | status = ocfs2_commit_cache(osb); |
2191 | if (status < 0) |
2192 | mlog_errno(status); |
2193 | |
2194 | if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){ |
2195 | mlog(ML_KTHREAD, |
2196 | "commit_thread: %u transactions pending on " |
2197 | "shutdown\n", |
2198 | atomic_read(&journal->j_num_trans)); |
2199 | } |
2200 | } |
2201 | |
2202 | return 0; |
2203 | } |
2204 | |
2205 | /* Reads all the journal inodes without taking any cluster locks. Used |
2206 | * for hard readonly access to determine whether any journal requires |
2207 | * recovery. Also used to refresh the recovery generation numbers after |
2208 | * a journal has been recovered by another node. |
2209 | */ |
2210 | int ocfs2_check_journals_nolocks(struct ocfs2_super *osb) |
2211 | { |
2212 | int ret = 0; |
2213 | unsigned int slot; |
2214 | struct buffer_head *di_bh = NULL; |
2215 | struct ocfs2_dinode *di; |
2216 | int journal_dirty = 0; |
2217 | |
2218 | for(slot = 0; slot < osb->max_slots; slot++) { |
2219 | ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL); |
2220 | if (ret) { |
2221 | mlog_errno(ret); |
2222 | goto out; |
2223 | } |
2224 | |
2225 | di = (struct ocfs2_dinode *) di_bh->b_data; |
2226 | |
2227 | osb->slot_recovery_generations[slot] = |
2228 | ocfs2_get_recovery_generation(di); |
2229 | |
2230 | if (le32_to_cpu(di->id1.journal1.ij_flags) & |
2231 | OCFS2_JOURNAL_DIRTY_FL) |
2232 | journal_dirty = 1; |
2233 | |
2234 | brelse(di_bh); |
2235 | di_bh = NULL; |
2236 | } |
2237 | |
2238 | out: |
2239 | if (journal_dirty) |
2240 | ret = -EROFS; |
2241 | return ret; |
2242 | } |
2243 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9