Root/
Source at commit b13e7eb172b6f08e5fc22da162bdde5fcde201b5 created 11 years 11 months ago. By Maarten ter Huurne, fbcon: Add 6x10 font | |
---|---|
1 | /* |
2 | * linux/fs/super.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * |
6 | * super.c contains code to handle: - mount structures |
7 | * - super-block tables |
8 | * - filesystem drivers list |
9 | * - mount system call |
10 | * - umount system call |
11 | * - ustat system call |
12 | * |
13 | * GK 2/5/95 - Changed to support mounting the root fs via NFS |
14 | * |
15 | * Added kerneld support: Jacques Gelinas and Bjorn Ekwall |
16 | * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 |
17 | * Added options to /proc/mounts: |
18 | * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. |
19 | * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998 |
20 | * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 |
21 | */ |
22 | |
23 | #include <linux/module.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/acct.h> |
26 | #include <linux/blkdev.h> |
27 | #include <linux/mount.h> |
28 | #include <linux/security.h> |
29 | #include <linux/writeback.h> /* for the emergency remount stuff */ |
30 | #include <linux/idr.h> |
31 | #include <linux/mutex.h> |
32 | #include <linux/backing-dev.h> |
33 | #include <linux/rculist_bl.h> |
34 | #include <linux/cleancache.h> |
35 | #include "internal.h" |
36 | |
37 | |
38 | LIST_HEAD(super_blocks); |
39 | DEFINE_SPINLOCK(sb_lock); |
40 | |
41 | /* |
42 | * One thing we have to be careful of with a per-sb shrinker is that we don't |
43 | * drop the last active reference to the superblock from within the shrinker. |
44 | * If that happens we could trigger unregistering the shrinker from within the |
45 | * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we |
46 | * take a passive reference to the superblock to avoid this from occurring. |
47 | */ |
48 | static int prune_super(struct shrinker *shrink, struct shrink_control *sc) |
49 | { |
50 | struct super_block *sb; |
51 | int fs_objects = 0; |
52 | int total_objects; |
53 | |
54 | sb = container_of(shrink, struct super_block, s_shrink); |
55 | |
56 | /* |
57 | * Deadlock avoidance. We may hold various FS locks, and we don't want |
58 | * to recurse into the FS that called us in clear_inode() and friends.. |
59 | */ |
60 | if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) |
61 | return -1; |
62 | |
63 | if (!grab_super_passive(sb)) |
64 | return !sc->nr_to_scan ? 0 : -1; |
65 | |
66 | if (sb->s_op && sb->s_op->nr_cached_objects) |
67 | fs_objects = sb->s_op->nr_cached_objects(sb); |
68 | |
69 | total_objects = sb->s_nr_dentry_unused + |
70 | sb->s_nr_inodes_unused + fs_objects + 1; |
71 | |
72 | if (sc->nr_to_scan) { |
73 | int dentries; |
74 | int inodes; |
75 | |
76 | /* proportion the scan between the caches */ |
77 | dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) / |
78 | total_objects; |
79 | inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) / |
80 | total_objects; |
81 | if (fs_objects) |
82 | fs_objects = (sc->nr_to_scan * fs_objects) / |
83 | total_objects; |
84 | /* |
85 | * prune the dcache first as the icache is pinned by it, then |
86 | * prune the icache, followed by the filesystem specific caches |
87 | */ |
88 | prune_dcache_sb(sb, dentries); |
89 | prune_icache_sb(sb, inodes); |
90 | |
91 | if (fs_objects && sb->s_op->free_cached_objects) { |
92 | sb->s_op->free_cached_objects(sb, fs_objects); |
93 | fs_objects = sb->s_op->nr_cached_objects(sb); |
94 | } |
95 | total_objects = sb->s_nr_dentry_unused + |
96 | sb->s_nr_inodes_unused + fs_objects; |
97 | } |
98 | |
99 | total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure; |
100 | drop_super(sb); |
101 | return total_objects; |
102 | } |
103 | |
104 | /** |
105 | * alloc_super - create new superblock |
106 | * @type: filesystem type superblock should belong to |
107 | * |
108 | * Allocates and initializes a new &struct super_block. alloc_super() |
109 | * returns a pointer new superblock or %NULL if allocation had failed. |
110 | */ |
111 | static struct super_block *alloc_super(struct file_system_type *type) |
112 | { |
113 | struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); |
114 | static const struct super_operations default_op; |
115 | |
116 | if (s) { |
117 | if (security_sb_alloc(s)) { |
118 | kfree(s); |
119 | s = NULL; |
120 | goto out; |
121 | } |
122 | #ifdef CONFIG_SMP |
123 | s->s_files = alloc_percpu(struct list_head); |
124 | if (!s->s_files) { |
125 | security_sb_free(s); |
126 | kfree(s); |
127 | s = NULL; |
128 | goto out; |
129 | } else { |
130 | int i; |
131 | |
132 | for_each_possible_cpu(i) |
133 | INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i)); |
134 | } |
135 | #else |
136 | INIT_LIST_HEAD(&s->s_files); |
137 | #endif |
138 | s->s_bdi = &default_backing_dev_info; |
139 | INIT_HLIST_NODE(&s->s_instances); |
140 | INIT_HLIST_BL_HEAD(&s->s_anon); |
141 | INIT_LIST_HEAD(&s->s_inodes); |
142 | INIT_LIST_HEAD(&s->s_dentry_lru); |
143 | INIT_LIST_HEAD(&s->s_inode_lru); |
144 | spin_lock_init(&s->s_inode_lru_lock); |
145 | INIT_LIST_HEAD(&s->s_mounts); |
146 | init_rwsem(&s->s_umount); |
147 | mutex_init(&s->s_lock); |
148 | lockdep_set_class(&s->s_umount, &type->s_umount_key); |
149 | /* |
150 | * The locking rules for s_lock are up to the |
151 | * filesystem. For example ext3fs has different |
152 | * lock ordering than usbfs: |
153 | */ |
154 | lockdep_set_class(&s->s_lock, &type->s_lock_key); |
155 | /* |
156 | * sget() can have s_umount recursion. |
157 | * |
158 | * When it cannot find a suitable sb, it allocates a new |
159 | * one (this one), and tries again to find a suitable old |
160 | * one. |
161 | * |
162 | * In case that succeeds, it will acquire the s_umount |
163 | * lock of the old one. Since these are clearly distrinct |
164 | * locks, and this object isn't exposed yet, there's no |
165 | * risk of deadlocks. |
166 | * |
167 | * Annotate this by putting this lock in a different |
168 | * subclass. |
169 | */ |
170 | down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); |
171 | s->s_count = 1; |
172 | atomic_set(&s->s_active, 1); |
173 | mutex_init(&s->s_vfs_rename_mutex); |
174 | lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); |
175 | mutex_init(&s->s_dquot.dqio_mutex); |
176 | mutex_init(&s->s_dquot.dqonoff_mutex); |
177 | init_rwsem(&s->s_dquot.dqptr_sem); |
178 | init_waitqueue_head(&s->s_wait_unfrozen); |
179 | s->s_maxbytes = MAX_NON_LFS; |
180 | s->s_op = &default_op; |
181 | s->s_time_gran = 1000000000; |
182 | s->cleancache_poolid = -1; |
183 | |
184 | s->s_shrink.seeks = DEFAULT_SEEKS; |
185 | s->s_shrink.shrink = prune_super; |
186 | s->s_shrink.batch = 1024; |
187 | } |
188 | out: |
189 | return s; |
190 | } |
191 | |
192 | /** |
193 | * destroy_super - frees a superblock |
194 | * @s: superblock to free |
195 | * |
196 | * Frees a superblock. |
197 | */ |
198 | static inline void destroy_super(struct super_block *s) |
199 | { |
200 | #ifdef CONFIG_SMP |
201 | free_percpu(s->s_files); |
202 | #endif |
203 | security_sb_free(s); |
204 | WARN_ON(!list_empty(&s->s_mounts)); |
205 | kfree(s->s_subtype); |
206 | kfree(s->s_options); |
207 | kfree(s); |
208 | } |
209 | |
210 | /* Superblock refcounting */ |
211 | |
212 | /* |
213 | * Drop a superblock's refcount. The caller must hold sb_lock. |
214 | */ |
215 | static void __put_super(struct super_block *sb) |
216 | { |
217 | if (!--sb->s_count) { |
218 | list_del_init(&sb->s_list); |
219 | destroy_super(sb); |
220 | } |
221 | } |
222 | |
223 | /** |
224 | * put_super - drop a temporary reference to superblock |
225 | * @sb: superblock in question |
226 | * |
227 | * Drops a temporary reference, frees superblock if there's no |
228 | * references left. |
229 | */ |
230 | static void put_super(struct super_block *sb) |
231 | { |
232 | spin_lock(&sb_lock); |
233 | __put_super(sb); |
234 | spin_unlock(&sb_lock); |
235 | } |
236 | |
237 | |
238 | /** |
239 | * deactivate_locked_super - drop an active reference to superblock |
240 | * @s: superblock to deactivate |
241 | * |
242 | * Drops an active reference to superblock, converting it into a temprory |
243 | * one if there is no other active references left. In that case we |
244 | * tell fs driver to shut it down and drop the temporary reference we |
245 | * had just acquired. |
246 | * |
247 | * Caller holds exclusive lock on superblock; that lock is released. |
248 | */ |
249 | void deactivate_locked_super(struct super_block *s) |
250 | { |
251 | struct file_system_type *fs = s->s_type; |
252 | if (atomic_dec_and_test(&s->s_active)) { |
253 | cleancache_flush_fs(s); |
254 | fs->kill_sb(s); |
255 | |
256 | /* caches are now gone, we can safely kill the shrinker now */ |
257 | unregister_shrinker(&s->s_shrink); |
258 | |
259 | /* |
260 | * We need to call rcu_barrier so all the delayed rcu free |
261 | * inodes are flushed before we release the fs module. |
262 | */ |
263 | rcu_barrier(); |
264 | put_filesystem(fs); |
265 | put_super(s); |
266 | } else { |
267 | up_write(&s->s_umount); |
268 | } |
269 | } |
270 | |
271 | EXPORT_SYMBOL(deactivate_locked_super); |
272 | |
273 | /** |
274 | * deactivate_super - drop an active reference to superblock |
275 | * @s: superblock to deactivate |
276 | * |
277 | * Variant of deactivate_locked_super(), except that superblock is *not* |
278 | * locked by caller. If we are going to drop the final active reference, |
279 | * lock will be acquired prior to that. |
280 | */ |
281 | void deactivate_super(struct super_block *s) |
282 | { |
283 | if (!atomic_add_unless(&s->s_active, -1, 1)) { |
284 | down_write(&s->s_umount); |
285 | deactivate_locked_super(s); |
286 | } |
287 | } |
288 | |
289 | EXPORT_SYMBOL(deactivate_super); |
290 | |
291 | /** |
292 | * grab_super - acquire an active reference |
293 | * @s: reference we are trying to make active |
294 | * |
295 | * Tries to acquire an active reference. grab_super() is used when we |
296 | * had just found a superblock in super_blocks or fs_type->fs_supers |
297 | * and want to turn it into a full-blown active reference. grab_super() |
298 | * is called with sb_lock held and drops it. Returns 1 in case of |
299 | * success, 0 if we had failed (superblock contents was already dead or |
300 | * dying when grab_super() had been called). |
301 | */ |
302 | static int grab_super(struct super_block *s) __releases(sb_lock) |
303 | { |
304 | if (atomic_inc_not_zero(&s->s_active)) { |
305 | spin_unlock(&sb_lock); |
306 | return 1; |
307 | } |
308 | /* it's going away */ |
309 | s->s_count++; |
310 | spin_unlock(&sb_lock); |
311 | /* wait for it to die */ |
312 | down_write(&s->s_umount); |
313 | up_write(&s->s_umount); |
314 | put_super(s); |
315 | return 0; |
316 | } |
317 | |
318 | /* |
319 | * grab_super_passive - acquire a passive reference |
320 | * @s: reference we are trying to grab |
321 | * |
322 | * Tries to acquire a passive reference. This is used in places where we |
323 | * cannot take an active reference but we need to ensure that the |
324 | * superblock does not go away while we are working on it. It returns |
325 | * false if a reference was not gained, and returns true with the s_umount |
326 | * lock held in read mode if a reference is gained. On successful return, |
327 | * the caller must drop the s_umount lock and the passive reference when |
328 | * done. |
329 | */ |
330 | bool grab_super_passive(struct super_block *sb) |
331 | { |
332 | spin_lock(&sb_lock); |
333 | if (hlist_unhashed(&sb->s_instances)) { |
334 | spin_unlock(&sb_lock); |
335 | return false; |
336 | } |
337 | |
338 | sb->s_count++; |
339 | spin_unlock(&sb_lock); |
340 | |
341 | if (down_read_trylock(&sb->s_umount)) { |
342 | if (sb->s_root && (sb->s_flags & MS_BORN)) |
343 | return true; |
344 | up_read(&sb->s_umount); |
345 | } |
346 | |
347 | put_super(sb); |
348 | return false; |
349 | } |
350 | |
351 | /* |
352 | * Superblock locking. We really ought to get rid of these two. |
353 | */ |
354 | void lock_super(struct super_block * sb) |
355 | { |
356 | mutex_lock(&sb->s_lock); |
357 | } |
358 | |
359 | void unlock_super(struct super_block * sb) |
360 | { |
361 | mutex_unlock(&sb->s_lock); |
362 | } |
363 | |
364 | EXPORT_SYMBOL(lock_super); |
365 | EXPORT_SYMBOL(unlock_super); |
366 | |
367 | /** |
368 | * generic_shutdown_super - common helper for ->kill_sb() |
369 | * @sb: superblock to kill |
370 | * |
371 | * generic_shutdown_super() does all fs-independent work on superblock |
372 | * shutdown. Typical ->kill_sb() should pick all fs-specific objects |
373 | * that need destruction out of superblock, call generic_shutdown_super() |
374 | * and release aforementioned objects. Note: dentries and inodes _are_ |
375 | * taken care of and do not need specific handling. |
376 | * |
377 | * Upon calling this function, the filesystem may no longer alter or |
378 | * rearrange the set of dentries belonging to this super_block, nor may it |
379 | * change the attachments of dentries to inodes. |
380 | */ |
381 | void generic_shutdown_super(struct super_block *sb) |
382 | { |
383 | const struct super_operations *sop = sb->s_op; |
384 | |
385 | if (sb->s_root) { |
386 | shrink_dcache_for_umount(sb); |
387 | sync_filesystem(sb); |
388 | sb->s_flags &= ~MS_ACTIVE; |
389 | |
390 | fsnotify_unmount_inodes(&sb->s_inodes); |
391 | |
392 | evict_inodes(sb); |
393 | |
394 | if (sop->put_super) |
395 | sop->put_super(sb); |
396 | |
397 | if (!list_empty(&sb->s_inodes)) { |
398 | printk("VFS: Busy inodes after unmount of %s. " |
399 | "Self-destruct in 5 seconds. Have a nice day...\n", |
400 | sb->s_id); |
401 | } |
402 | } |
403 | spin_lock(&sb_lock); |
404 | /* should be initialized for __put_super_and_need_restart() */ |
405 | hlist_del_init(&sb->s_instances); |
406 | spin_unlock(&sb_lock); |
407 | up_write(&sb->s_umount); |
408 | } |
409 | |
410 | EXPORT_SYMBOL(generic_shutdown_super); |
411 | |
412 | /** |
413 | * sget - find or create a superblock |
414 | * @type: filesystem type superblock should belong to |
415 | * @test: comparison callback |
416 | * @set: setup callback |
417 | * @data: argument to each of them |
418 | */ |
419 | struct super_block *sget(struct file_system_type *type, |
420 | int (*test)(struct super_block *,void *), |
421 | int (*set)(struct super_block *,void *), |
422 | void *data) |
423 | { |
424 | struct super_block *s = NULL; |
425 | struct hlist_node *node; |
426 | struct super_block *old; |
427 | int err; |
428 | |
429 | retry: |
430 | spin_lock(&sb_lock); |
431 | if (test) { |
432 | hlist_for_each_entry(old, node, &type->fs_supers, s_instances) { |
433 | if (!test(old, data)) |
434 | continue; |
435 | if (!grab_super(old)) |
436 | goto retry; |
437 | if (s) { |
438 | up_write(&s->s_umount); |
439 | destroy_super(s); |
440 | s = NULL; |
441 | } |
442 | down_write(&old->s_umount); |
443 | if (unlikely(!(old->s_flags & MS_BORN))) { |
444 | deactivate_locked_super(old); |
445 | goto retry; |
446 | } |
447 | return old; |
448 | } |
449 | } |
450 | if (!s) { |
451 | spin_unlock(&sb_lock); |
452 | s = alloc_super(type); |
453 | if (!s) |
454 | return ERR_PTR(-ENOMEM); |
455 | goto retry; |
456 | } |
457 | |
458 | err = set(s, data); |
459 | if (err) { |
460 | spin_unlock(&sb_lock); |
461 | up_write(&s->s_umount); |
462 | destroy_super(s); |
463 | return ERR_PTR(err); |
464 | } |
465 | s->s_type = type; |
466 | strlcpy(s->s_id, type->name, sizeof(s->s_id)); |
467 | list_add_tail(&s->s_list, &super_blocks); |
468 | hlist_add_head(&s->s_instances, &type->fs_supers); |
469 | spin_unlock(&sb_lock); |
470 | get_filesystem(type); |
471 | register_shrinker(&s->s_shrink); |
472 | return s; |
473 | } |
474 | |
475 | EXPORT_SYMBOL(sget); |
476 | |
477 | void drop_super(struct super_block *sb) |
478 | { |
479 | up_read(&sb->s_umount); |
480 | put_super(sb); |
481 | } |
482 | |
483 | EXPORT_SYMBOL(drop_super); |
484 | |
485 | /** |
486 | * sync_supers - helper for periodic superblock writeback |
487 | * |
488 | * Call the write_super method if present on all dirty superblocks in |
489 | * the system. This is for the periodic writeback used by most older |
490 | * filesystems. For data integrity superblock writeback use |
491 | * sync_filesystems() instead. |
492 | * |
493 | * Note: check the dirty flag before waiting, so we don't |
494 | * hold up the sync while mounting a device. (The newly |
495 | * mounted device won't need syncing.) |
496 | */ |
497 | void sync_supers(void) |
498 | { |
499 | struct super_block *sb, *p = NULL; |
500 | |
501 | spin_lock(&sb_lock); |
502 | list_for_each_entry(sb, &super_blocks, s_list) { |
503 | if (hlist_unhashed(&sb->s_instances)) |
504 | continue; |
505 | if (sb->s_op->write_super && sb->s_dirt) { |
506 | sb->s_count++; |
507 | spin_unlock(&sb_lock); |
508 | |
509 | down_read(&sb->s_umount); |
510 | if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN)) |
511 | sb->s_op->write_super(sb); |
512 | up_read(&sb->s_umount); |
513 | |
514 | spin_lock(&sb_lock); |
515 | if (p) |
516 | __put_super(p); |
517 | p = sb; |
518 | } |
519 | } |
520 | if (p) |
521 | __put_super(p); |
522 | spin_unlock(&sb_lock); |
523 | } |
524 | |
525 | /** |
526 | * iterate_supers - call function for all active superblocks |
527 | * @f: function to call |
528 | * @arg: argument to pass to it |
529 | * |
530 | * Scans the superblock list and calls given function, passing it |
531 | * locked superblock and given argument. |
532 | */ |
533 | void iterate_supers(void (*f)(struct super_block *, void *), void *arg) |
534 | { |
535 | struct super_block *sb, *p = NULL; |
536 | |
537 | spin_lock(&sb_lock); |
538 | list_for_each_entry(sb, &super_blocks, s_list) { |
539 | if (hlist_unhashed(&sb->s_instances)) |
540 | continue; |
541 | sb->s_count++; |
542 | spin_unlock(&sb_lock); |
543 | |
544 | down_read(&sb->s_umount); |
545 | if (sb->s_root && (sb->s_flags & MS_BORN)) |
546 | f(sb, arg); |
547 | up_read(&sb->s_umount); |
548 | |
549 | spin_lock(&sb_lock); |
550 | if (p) |
551 | __put_super(p); |
552 | p = sb; |
553 | } |
554 | if (p) |
555 | __put_super(p); |
556 | spin_unlock(&sb_lock); |
557 | } |
558 | |
559 | /** |
560 | * iterate_supers_type - call function for superblocks of given type |
561 | * @type: fs type |
562 | * @f: function to call |
563 | * @arg: argument to pass to it |
564 | * |
565 | * Scans the superblock list and calls given function, passing it |
566 | * locked superblock and given argument. |
567 | */ |
568 | void iterate_supers_type(struct file_system_type *type, |
569 | void (*f)(struct super_block *, void *), void *arg) |
570 | { |
571 | struct super_block *sb, *p = NULL; |
572 | struct hlist_node *node; |
573 | |
574 | spin_lock(&sb_lock); |
575 | hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) { |
576 | sb->s_count++; |
577 | spin_unlock(&sb_lock); |
578 | |
579 | down_read(&sb->s_umount); |
580 | if (sb->s_root && (sb->s_flags & MS_BORN)) |
581 | f(sb, arg); |
582 | up_read(&sb->s_umount); |
583 | |
584 | spin_lock(&sb_lock); |
585 | if (p) |
586 | __put_super(p); |
587 | p = sb; |
588 | } |
589 | if (p) |
590 | __put_super(p); |
591 | spin_unlock(&sb_lock); |
592 | } |
593 | |
594 | EXPORT_SYMBOL(iterate_supers_type); |
595 | |
596 | /** |
597 | * get_super - get the superblock of a device |
598 | * @bdev: device to get the superblock for |
599 | * |
600 | * Scans the superblock list and finds the superblock of the file system |
601 | * mounted on the device given. %NULL is returned if no match is found. |
602 | */ |
603 | |
604 | struct super_block *get_super(struct block_device *bdev) |
605 | { |
606 | struct super_block *sb; |
607 | |
608 | if (!bdev) |
609 | return NULL; |
610 | |
611 | spin_lock(&sb_lock); |
612 | rescan: |
613 | list_for_each_entry(sb, &super_blocks, s_list) { |
614 | if (hlist_unhashed(&sb->s_instances)) |
615 | continue; |
616 | if (sb->s_bdev == bdev) { |
617 | sb->s_count++; |
618 | spin_unlock(&sb_lock); |
619 | down_read(&sb->s_umount); |
620 | /* still alive? */ |
621 | if (sb->s_root && (sb->s_flags & MS_BORN)) |
622 | return sb; |
623 | up_read(&sb->s_umount); |
624 | /* nope, got unmounted */ |
625 | spin_lock(&sb_lock); |
626 | __put_super(sb); |
627 | goto rescan; |
628 | } |
629 | } |
630 | spin_unlock(&sb_lock); |
631 | return NULL; |
632 | } |
633 | |
634 | EXPORT_SYMBOL(get_super); |
635 | |
636 | /** |
637 | * get_super_thawed - get thawed superblock of a device |
638 | * @bdev: device to get the superblock for |
639 | * |
640 | * Scans the superblock list and finds the superblock of the file system |
641 | * mounted on the device. The superblock is returned once it is thawed |
642 | * (or immediately if it was not frozen). %NULL is returned if no match |
643 | * is found. |
644 | */ |
645 | struct super_block *get_super_thawed(struct block_device *bdev) |
646 | { |
647 | while (1) { |
648 | struct super_block *s = get_super(bdev); |
649 | if (!s || s->s_frozen == SB_UNFROZEN) |
650 | return s; |
651 | up_read(&s->s_umount); |
652 | vfs_check_frozen(s, SB_FREEZE_WRITE); |
653 | put_super(s); |
654 | } |
655 | } |
656 | EXPORT_SYMBOL(get_super_thawed); |
657 | |
658 | /** |
659 | * get_active_super - get an active reference to the superblock of a device |
660 | * @bdev: device to get the superblock for |
661 | * |
662 | * Scans the superblock list and finds the superblock of the file system |
663 | * mounted on the device given. Returns the superblock with an active |
664 | * reference or %NULL if none was found. |
665 | */ |
666 | struct super_block *get_active_super(struct block_device *bdev) |
667 | { |
668 | struct super_block *sb; |
669 | |
670 | if (!bdev) |
671 | return NULL; |
672 | |
673 | restart: |
674 | spin_lock(&sb_lock); |
675 | list_for_each_entry(sb, &super_blocks, s_list) { |
676 | if (hlist_unhashed(&sb->s_instances)) |
677 | continue; |
678 | if (sb->s_bdev == bdev) { |
679 | if (grab_super(sb)) /* drops sb_lock */ |
680 | return sb; |
681 | else |
682 | goto restart; |
683 | } |
684 | } |
685 | spin_unlock(&sb_lock); |
686 | return NULL; |
687 | } |
688 | |
689 | struct super_block *user_get_super(dev_t dev) |
690 | { |
691 | struct super_block *sb; |
692 | |
693 | spin_lock(&sb_lock); |
694 | rescan: |
695 | list_for_each_entry(sb, &super_blocks, s_list) { |
696 | if (hlist_unhashed(&sb->s_instances)) |
697 | continue; |
698 | if (sb->s_dev == dev) { |
699 | sb->s_count++; |
700 | spin_unlock(&sb_lock); |
701 | down_read(&sb->s_umount); |
702 | /* still alive? */ |
703 | if (sb->s_root && (sb->s_flags & MS_BORN)) |
704 | return sb; |
705 | up_read(&sb->s_umount); |
706 | /* nope, got unmounted */ |
707 | spin_lock(&sb_lock); |
708 | __put_super(sb); |
709 | goto rescan; |
710 | } |
711 | } |
712 | spin_unlock(&sb_lock); |
713 | return NULL; |
714 | } |
715 | |
716 | /** |
717 | * do_remount_sb - asks filesystem to change mount options. |
718 | * @sb: superblock in question |
719 | * @flags: numeric part of options |
720 | * @data: the rest of options |
721 | * @force: whether or not to force the change |
722 | * |
723 | * Alters the mount options of a mounted file system. |
724 | */ |
725 | int do_remount_sb(struct super_block *sb, int flags, void *data, int force) |
726 | { |
727 | int retval; |
728 | int remount_ro; |
729 | |
730 | if (sb->s_frozen != SB_UNFROZEN) |
731 | return -EBUSY; |
732 | |
733 | #ifdef CONFIG_BLOCK |
734 | if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev)) |
735 | return -EACCES; |
736 | #endif |
737 | |
738 | if (flags & MS_RDONLY) |
739 | acct_auto_close(sb); |
740 | shrink_dcache_sb(sb); |
741 | sync_filesystem(sb); |
742 | |
743 | remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); |
744 | |
745 | /* If we are remounting RDONLY and current sb is read/write, |
746 | make sure there are no rw files opened */ |
747 | if (remount_ro) { |
748 | if (force) { |
749 | mark_files_ro(sb); |
750 | } else { |
751 | retval = sb_prepare_remount_readonly(sb); |
752 | if (retval) |
753 | return retval; |
754 | } |
755 | } |
756 | |
757 | if (sb->s_op->remount_fs) { |
758 | retval = sb->s_op->remount_fs(sb, &flags, data); |
759 | if (retval) { |
760 | if (!force) |
761 | goto cancel_readonly; |
762 | /* If forced remount, go ahead despite any errors */ |
763 | WARN(1, "forced remount of a %s fs returned %i\n", |
764 | sb->s_type->name, retval); |
765 | } |
766 | } |
767 | sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); |
768 | /* Needs to be ordered wrt mnt_is_readonly() */ |
769 | smp_wmb(); |
770 | sb->s_readonly_remount = 0; |
771 | |
772 | /* |
773 | * Some filesystems modify their metadata via some other path than the |
774 | * bdev buffer cache (eg. use a private mapping, or directories in |
775 | * pagecache, etc). Also file data modifications go via their own |
776 | * mappings. So If we try to mount readonly then copy the filesystem |
777 | * from bdev, we could get stale data, so invalidate it to give a best |
778 | * effort at coherency. |
779 | */ |
780 | if (remount_ro && sb->s_bdev) |
781 | invalidate_bdev(sb->s_bdev); |
782 | return 0; |
783 | |
784 | cancel_readonly: |
785 | sb->s_readonly_remount = 0; |
786 | return retval; |
787 | } |
788 | |
789 | static void do_emergency_remount(struct work_struct *work) |
790 | { |
791 | struct super_block *sb, *p = NULL; |
792 | |
793 | spin_lock(&sb_lock); |
794 | list_for_each_entry(sb, &super_blocks, s_list) { |
795 | if (hlist_unhashed(&sb->s_instances)) |
796 | continue; |
797 | sb->s_count++; |
798 | spin_unlock(&sb_lock); |
799 | down_write(&sb->s_umount); |
800 | if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) && |
801 | !(sb->s_flags & MS_RDONLY)) { |
802 | /* |
803 | * What lock protects sb->s_flags?? |
804 | */ |
805 | do_remount_sb(sb, MS_RDONLY, NULL, 1); |
806 | } |
807 | up_write(&sb->s_umount); |
808 | spin_lock(&sb_lock); |
809 | if (p) |
810 | __put_super(p); |
811 | p = sb; |
812 | } |
813 | if (p) |
814 | __put_super(p); |
815 | spin_unlock(&sb_lock); |
816 | kfree(work); |
817 | printk("Emergency Remount complete\n"); |
818 | } |
819 | |
820 | void emergency_remount(void) |
821 | { |
822 | struct work_struct *work; |
823 | |
824 | work = kmalloc(sizeof(*work), GFP_ATOMIC); |
825 | if (work) { |
826 | INIT_WORK(work, do_emergency_remount); |
827 | schedule_work(work); |
828 | } |
829 | } |
830 | |
831 | /* |
832 | * Unnamed block devices are dummy devices used by virtual |
833 | * filesystems which don't use real block-devices. -- jrs |
834 | */ |
835 | |
836 | static DEFINE_IDA(unnamed_dev_ida); |
837 | static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ |
838 | static int unnamed_dev_start = 0; /* don't bother trying below it */ |
839 | |
840 | int get_anon_bdev(dev_t *p) |
841 | { |
842 | int dev; |
843 | int error; |
844 | |
845 | retry: |
846 | if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) |
847 | return -ENOMEM; |
848 | spin_lock(&unnamed_dev_lock); |
849 | error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev); |
850 | if (!error) |
851 | unnamed_dev_start = dev + 1; |
852 | spin_unlock(&unnamed_dev_lock); |
853 | if (error == -EAGAIN) |
854 | /* We raced and lost with another CPU. */ |
855 | goto retry; |
856 | else if (error) |
857 | return -EAGAIN; |
858 | |
859 | if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) { |
860 | spin_lock(&unnamed_dev_lock); |
861 | ida_remove(&unnamed_dev_ida, dev); |
862 | if (unnamed_dev_start > dev) |
863 | unnamed_dev_start = dev; |
864 | spin_unlock(&unnamed_dev_lock); |
865 | return -EMFILE; |
866 | } |
867 | *p = MKDEV(0, dev & MINORMASK); |
868 | return 0; |
869 | } |
870 | EXPORT_SYMBOL(get_anon_bdev); |
871 | |
872 | void free_anon_bdev(dev_t dev) |
873 | { |
874 | int slot = MINOR(dev); |
875 | spin_lock(&unnamed_dev_lock); |
876 | ida_remove(&unnamed_dev_ida, slot); |
877 | if (slot < unnamed_dev_start) |
878 | unnamed_dev_start = slot; |
879 | spin_unlock(&unnamed_dev_lock); |
880 | } |
881 | EXPORT_SYMBOL(free_anon_bdev); |
882 | |
883 | int set_anon_super(struct super_block *s, void *data) |
884 | { |
885 | int error = get_anon_bdev(&s->s_dev); |
886 | if (!error) |
887 | s->s_bdi = &noop_backing_dev_info; |
888 | return error; |
889 | } |
890 | |
891 | EXPORT_SYMBOL(set_anon_super); |
892 | |
893 | void kill_anon_super(struct super_block *sb) |
894 | { |
895 | dev_t dev = sb->s_dev; |
896 | generic_shutdown_super(sb); |
897 | free_anon_bdev(dev); |
898 | } |
899 | |
900 | EXPORT_SYMBOL(kill_anon_super); |
901 | |
902 | void kill_litter_super(struct super_block *sb) |
903 | { |
904 | if (sb->s_root) |
905 | d_genocide(sb->s_root); |
906 | kill_anon_super(sb); |
907 | } |
908 | |
909 | EXPORT_SYMBOL(kill_litter_super); |
910 | |
911 | static int ns_test_super(struct super_block *sb, void *data) |
912 | { |
913 | return sb->s_fs_info == data; |
914 | } |
915 | |
916 | static int ns_set_super(struct super_block *sb, void *data) |
917 | { |
918 | sb->s_fs_info = data; |
919 | return set_anon_super(sb, NULL); |
920 | } |
921 | |
922 | struct dentry *mount_ns(struct file_system_type *fs_type, int flags, |
923 | void *data, int (*fill_super)(struct super_block *, void *, int)) |
924 | { |
925 | struct super_block *sb; |
926 | |
927 | sb = sget(fs_type, ns_test_super, ns_set_super, data); |
928 | if (IS_ERR(sb)) |
929 | return ERR_CAST(sb); |
930 | |
931 | if (!sb->s_root) { |
932 | int err; |
933 | sb->s_flags = flags; |
934 | err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); |
935 | if (err) { |
936 | deactivate_locked_super(sb); |
937 | return ERR_PTR(err); |
938 | } |
939 | |
940 | sb->s_flags |= MS_ACTIVE; |
941 | } |
942 | |
943 | return dget(sb->s_root); |
944 | } |
945 | |
946 | EXPORT_SYMBOL(mount_ns); |
947 | |
948 | #ifdef CONFIG_BLOCK |
949 | static int set_bdev_super(struct super_block *s, void *data) |
950 | { |
951 | s->s_bdev = data; |
952 | s->s_dev = s->s_bdev->bd_dev; |
953 | |
954 | /* |
955 | * We set the bdi here to the queue backing, file systems can |
956 | * overwrite this in ->fill_super() |
957 | */ |
958 | s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; |
959 | return 0; |
960 | } |
961 | |
962 | static int test_bdev_super(struct super_block *s, void *data) |
963 | { |
964 | return (void *)s->s_bdev == data; |
965 | } |
966 | |
967 | struct dentry *mount_bdev(struct file_system_type *fs_type, |
968 | int flags, const char *dev_name, void *data, |
969 | int (*fill_super)(struct super_block *, void *, int)) |
970 | { |
971 | struct block_device *bdev; |
972 | struct super_block *s; |
973 | fmode_t mode = FMODE_READ | FMODE_EXCL; |
974 | int error = 0; |
975 | |
976 | if (!(flags & MS_RDONLY)) |
977 | mode |= FMODE_WRITE; |
978 | |
979 | bdev = blkdev_get_by_path(dev_name, mode, fs_type); |
980 | if (IS_ERR(bdev)) |
981 | return ERR_CAST(bdev); |
982 | |
983 | /* |
984 | * once the super is inserted into the list by sget, s_umount |
985 | * will protect the lockfs code from trying to start a snapshot |
986 | * while we are mounting |
987 | */ |
988 | mutex_lock(&bdev->bd_fsfreeze_mutex); |
989 | if (bdev->bd_fsfreeze_count > 0) { |
990 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
991 | error = -EBUSY; |
992 | goto error_bdev; |
993 | } |
994 | s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); |
995 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
996 | if (IS_ERR(s)) |
997 | goto error_s; |
998 | |
999 | if (s->s_root) { |
1000 | if ((flags ^ s->s_flags) & MS_RDONLY) { |
1001 | deactivate_locked_super(s); |
1002 | error = -EBUSY; |
1003 | goto error_bdev; |
1004 | } |
1005 | |
1006 | /* |
1007 | * s_umount nests inside bd_mutex during |
1008 | * __invalidate_device(). blkdev_put() acquires |
1009 | * bd_mutex and can't be called under s_umount. Drop |
1010 | * s_umount temporarily. This is safe as we're |
1011 | * holding an active reference. |
1012 | */ |
1013 | up_write(&s->s_umount); |
1014 | blkdev_put(bdev, mode); |
1015 | down_write(&s->s_umount); |
1016 | } else { |
1017 | char b[BDEVNAME_SIZE]; |
1018 | |
1019 | s->s_flags = flags | MS_NOSEC; |
1020 | s->s_mode = mode; |
1021 | strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); |
1022 | sb_set_blocksize(s, block_size(bdev)); |
1023 | error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); |
1024 | if (error) { |
1025 | deactivate_locked_super(s); |
1026 | goto error; |
1027 | } |
1028 | |
1029 | s->s_flags |= MS_ACTIVE; |
1030 | bdev->bd_super = s; |
1031 | } |
1032 | |
1033 | return dget(s->s_root); |
1034 | |
1035 | error_s: |
1036 | error = PTR_ERR(s); |
1037 | error_bdev: |
1038 | blkdev_put(bdev, mode); |
1039 | error: |
1040 | return ERR_PTR(error); |
1041 | } |
1042 | EXPORT_SYMBOL(mount_bdev); |
1043 | |
1044 | void kill_block_super(struct super_block *sb) |
1045 | { |
1046 | struct block_device *bdev = sb->s_bdev; |
1047 | fmode_t mode = sb->s_mode; |
1048 | |
1049 | bdev->bd_super = NULL; |
1050 | generic_shutdown_super(sb); |
1051 | sync_blockdev(bdev); |
1052 | WARN_ON_ONCE(!(mode & FMODE_EXCL)); |
1053 | blkdev_put(bdev, mode | FMODE_EXCL); |
1054 | } |
1055 | |
1056 | EXPORT_SYMBOL(kill_block_super); |
1057 | #endif |
1058 | |
1059 | struct dentry *mount_nodev(struct file_system_type *fs_type, |
1060 | int flags, void *data, |
1061 | int (*fill_super)(struct super_block *, void *, int)) |
1062 | { |
1063 | int error; |
1064 | struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); |
1065 | |
1066 | if (IS_ERR(s)) |
1067 | return ERR_CAST(s); |
1068 | |
1069 | s->s_flags = flags; |
1070 | |
1071 | error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); |
1072 | if (error) { |
1073 | deactivate_locked_super(s); |
1074 | return ERR_PTR(error); |
1075 | } |
1076 | s->s_flags |= MS_ACTIVE; |
1077 | return dget(s->s_root); |
1078 | } |
1079 | EXPORT_SYMBOL(mount_nodev); |
1080 | |
1081 | static int compare_single(struct super_block *s, void *p) |
1082 | { |
1083 | return 1; |
1084 | } |
1085 | |
1086 | struct dentry *mount_single(struct file_system_type *fs_type, |
1087 | int flags, void *data, |
1088 | int (*fill_super)(struct super_block *, void *, int)) |
1089 | { |
1090 | struct super_block *s; |
1091 | int error; |
1092 | |
1093 | s = sget(fs_type, compare_single, set_anon_super, NULL); |
1094 | if (IS_ERR(s)) |
1095 | return ERR_CAST(s); |
1096 | if (!s->s_root) { |
1097 | s->s_flags = flags; |
1098 | error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); |
1099 | if (error) { |
1100 | deactivate_locked_super(s); |
1101 | return ERR_PTR(error); |
1102 | } |
1103 | s->s_flags |= MS_ACTIVE; |
1104 | } else { |
1105 | do_remount_sb(s, flags, data, 0); |
1106 | } |
1107 | return dget(s->s_root); |
1108 | } |
1109 | EXPORT_SYMBOL(mount_single); |
1110 | |
1111 | struct dentry * |
1112 | mount_fs(struct file_system_type *type, int flags, const char *name, void *data) |
1113 | { |
1114 | struct dentry *root; |
1115 | struct super_block *sb; |
1116 | char *secdata = NULL; |
1117 | int error = -ENOMEM; |
1118 | |
1119 | if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) { |
1120 | secdata = alloc_secdata(); |
1121 | if (!secdata) |
1122 | goto out; |
1123 | |
1124 | error = security_sb_copy_data(data, secdata); |
1125 | if (error) |
1126 | goto out_free_secdata; |
1127 | } |
1128 | |
1129 | root = type->mount(type, flags, name, data); |
1130 | if (IS_ERR(root)) { |
1131 | error = PTR_ERR(root); |
1132 | goto out_free_secdata; |
1133 | } |
1134 | sb = root->d_sb; |
1135 | BUG_ON(!sb); |
1136 | WARN_ON(!sb->s_bdi); |
1137 | WARN_ON(sb->s_bdi == &default_backing_dev_info); |
1138 | sb->s_flags |= MS_BORN; |
1139 | |
1140 | error = security_sb_kern_mount(sb, flags, secdata); |
1141 | if (error) |
1142 | goto out_sb; |
1143 | |
1144 | /* |
1145 | * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE |
1146 | * but s_maxbytes was an unsigned long long for many releases. Throw |
1147 | * this warning for a little while to try and catch filesystems that |
1148 | * violate this rule. |
1149 | */ |
1150 | WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " |
1151 | "negative value (%lld)\n", type->name, sb->s_maxbytes); |
1152 | |
1153 | up_write(&sb->s_umount); |
1154 | free_secdata(secdata); |
1155 | return root; |
1156 | out_sb: |
1157 | dput(root); |
1158 | deactivate_locked_super(sb); |
1159 | out_free_secdata: |
1160 | free_secdata(secdata); |
1161 | out: |
1162 | return ERR_PTR(error); |
1163 | } |
1164 | |
1165 | /** |
1166 | * freeze_super - lock the filesystem and force it into a consistent state |
1167 | * @sb: the super to lock |
1168 | * |
1169 | * Syncs the super to make sure the filesystem is consistent and calls the fs's |
1170 | * freeze_fs. Subsequent calls to this without first thawing the fs will return |
1171 | * -EBUSY. |
1172 | */ |
1173 | int freeze_super(struct super_block *sb) |
1174 | { |
1175 | int ret; |
1176 | |
1177 | atomic_inc(&sb->s_active); |
1178 | down_write(&sb->s_umount); |
1179 | if (sb->s_frozen) { |
1180 | deactivate_locked_super(sb); |
1181 | return -EBUSY; |
1182 | } |
1183 | |
1184 | if (!(sb->s_flags & MS_BORN)) { |
1185 | up_write(&sb->s_umount); |
1186 | return 0; /* sic - it's "nothing to do" */ |
1187 | } |
1188 | |
1189 | if (sb->s_flags & MS_RDONLY) { |
1190 | sb->s_frozen = SB_FREEZE_TRANS; |
1191 | smp_wmb(); |
1192 | up_write(&sb->s_umount); |
1193 | return 0; |
1194 | } |
1195 | |
1196 | sb->s_frozen = SB_FREEZE_WRITE; |
1197 | smp_wmb(); |
1198 | |
1199 | sync_filesystem(sb); |
1200 | |
1201 | sb->s_frozen = SB_FREEZE_TRANS; |
1202 | smp_wmb(); |
1203 | |
1204 | sync_blockdev(sb->s_bdev); |
1205 | if (sb->s_op->freeze_fs) { |
1206 | ret = sb->s_op->freeze_fs(sb); |
1207 | if (ret) { |
1208 | printk(KERN_ERR |
1209 | "VFS:Filesystem freeze failed\n"); |
1210 | sb->s_frozen = SB_UNFROZEN; |
1211 | smp_wmb(); |
1212 | wake_up(&sb->s_wait_unfrozen); |
1213 | deactivate_locked_super(sb); |
1214 | return ret; |
1215 | } |
1216 | } |
1217 | up_write(&sb->s_umount); |
1218 | return 0; |
1219 | } |
1220 | EXPORT_SYMBOL(freeze_super); |
1221 | |
1222 | /** |
1223 | * thaw_super -- unlock filesystem |
1224 | * @sb: the super to thaw |
1225 | * |
1226 | * Unlocks the filesystem and marks it writeable again after freeze_super(). |
1227 | */ |
1228 | int thaw_super(struct super_block *sb) |
1229 | { |
1230 | int error; |
1231 | |
1232 | down_write(&sb->s_umount); |
1233 | if (sb->s_frozen == SB_UNFROZEN) { |
1234 | up_write(&sb->s_umount); |
1235 | return -EINVAL; |
1236 | } |
1237 | |
1238 | if (sb->s_flags & MS_RDONLY) |
1239 | goto out; |
1240 | |
1241 | if (sb->s_op->unfreeze_fs) { |
1242 | error = sb->s_op->unfreeze_fs(sb); |
1243 | if (error) { |
1244 | printk(KERN_ERR |
1245 | "VFS:Filesystem thaw failed\n"); |
1246 | sb->s_frozen = SB_FREEZE_TRANS; |
1247 | up_write(&sb->s_umount); |
1248 | return error; |
1249 | } |
1250 | } |
1251 | |
1252 | out: |
1253 | sb->s_frozen = SB_UNFROZEN; |
1254 | smp_wmb(); |
1255 | wake_up(&sb->s_wait_unfrozen); |
1256 | deactivate_locked_super(sb); |
1257 | |
1258 | return 0; |
1259 | } |
1260 | EXPORT_SYMBOL(thaw_super); |
1261 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9