Root/Documentation/filesystems/Locking

1    The text below describes the locking rules for VFS-related methods.
2It is (believed to be) up-to-date. *Please*, if you change anything in
3prototypes or locking protocols - update this file. And update the relevant
4instances in the tree, don't leave that to maintainers of filesystems/devices/
5etc. At the very least, put the list of dubious cases in the end of this file.
6Don't turn it into log - maintainers of out-of-the-tree code are supposed to
7be able to use diff(1).
8    Thing currently missing here: socket operations. Alexey?
9
10--------------------------- dentry_operations --------------------------
11prototypes:
12    int (*d_revalidate)(struct dentry *, struct nameidata *);
13    int (*d_hash)(const struct dentry *, const struct inode *,
14            struct qstr *);
15    int (*d_compare)(const struct dentry *, const struct inode *,
16            const struct dentry *, const struct inode *,
17            unsigned int, const char *, const struct qstr *);
18    int (*d_delete)(struct dentry *);
19    void (*d_release)(struct dentry *);
20    void (*d_iput)(struct dentry *, struct inode *);
21    char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
22    struct vfsmount *(*d_automount)(struct path *path);
23    int (*d_manage)(struct dentry *, bool);
24
25locking rules:
26        rename_lock ->d_lock may block rcu-walk
27d_revalidate: no no yes (ref-walk) maybe
28d_hash no no no maybe
29d_compare: yes no no maybe
30d_delete: no yes no no
31d_release: no no yes no
32d_iput: no no yes no
33d_dname: no no no no
34d_automount: no no yes no
35d_manage: no no yes (ref-walk) maybe
36
37--------------------------- inode_operations ---------------------------
38prototypes:
39    int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
40    struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameid
41ata *);
42    int (*link) (struct dentry *,struct inode *,struct dentry *);
43    int (*unlink) (struct inode *,struct dentry *);
44    int (*symlink) (struct inode *,struct dentry *,const char *);
45    int (*mkdir) (struct inode *,struct dentry *,int);
46    int (*rmdir) (struct inode *,struct dentry *);
47    int (*mknod) (struct inode *,struct dentry *,int,dev_t);
48    int (*rename) (struct inode *, struct dentry *,
49            struct inode *, struct dentry *);
50    int (*readlink) (struct dentry *, char __user *,int);
51    void * (*follow_link) (struct dentry *, struct nameidata *);
52    void (*put_link) (struct dentry *, struct nameidata *, void *);
53    void (*truncate) (struct inode *);
54    int (*permission) (struct inode *, int, unsigned int);
55    int (*check_acl)(struct inode *, int, unsigned int);
56    int (*setattr) (struct dentry *, struct iattr *);
57    int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
58    int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
59    ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
60    ssize_t (*listxattr) (struct dentry *, char *, size_t);
61    int (*removexattr) (struct dentry *, const char *);
62    void (*truncate_range)(struct inode *, loff_t, loff_t);
63    int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
64
65locking rules:
66    all may block
67        i_mutex(inode)
68lookup: yes
69create: yes
70link: yes (both)
71mknod: yes
72symlink: yes
73mkdir: yes
74unlink: yes (both)
75rmdir: yes (both) (see below)
76rename: yes (all) (see below)
77readlink: no
78follow_link: no
79put_link: no
80truncate: yes (see below)
81setattr: yes
82permission: no (may not block if called in rcu-walk mode)
83check_acl: no
84getattr: no
85setxattr: yes
86getxattr: no
87listxattr: no
88removexattr: yes
89truncate_range: yes
90fiemap: no
91    Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
92victim.
93    cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
94    ->truncate() is never called directly - it's a callback, not a
95method. It's called by vmtruncate() - deprecated library function used by
96->setattr(). Locking information above applies to that call (i.e. is
97inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been
98passed).
99
100See Documentation/filesystems/directory-locking for more detailed discussion
101of the locking scheme for directory operations.
102
103--------------------------- super_operations ---------------------------
104prototypes:
105    struct inode *(*alloc_inode)(struct super_block *sb);
106    void (*destroy_inode)(struct inode *);
107    void (*dirty_inode) (struct inode *);
108    int (*write_inode) (struct inode *, struct writeback_control *wbc);
109    int (*drop_inode) (struct inode *);
110    void (*evict_inode) (struct inode *);
111    void (*put_super) (struct super_block *);
112    void (*write_super) (struct super_block *);
113    int (*sync_fs)(struct super_block *sb, int wait);
114    int (*freeze_fs) (struct super_block *);
115    int (*unfreeze_fs) (struct super_block *);
116    int (*statfs) (struct dentry *, struct kstatfs *);
117    int (*remount_fs) (struct super_block *, int *, char *);
118    void (*umount_begin) (struct super_block *);
119    int (*show_options)(struct seq_file *, struct vfsmount *);
120    ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
121    ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
122    int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
123
124locking rules:
125    All may block [not true, see below]
126            s_umount
127alloc_inode:
128destroy_inode:
129dirty_inode: (must not sleep)
130write_inode:
131drop_inode: !!!inode_lock!!!
132evict_inode:
133put_super: write
134write_super: read
135sync_fs: read
136freeze_fs: read
137unfreeze_fs: read
138statfs: maybe(read) (see below)
139remount_fs: write
140umount_begin: no
141show_options: no (namespace_sem)
142quota_read: no (see below)
143quota_write: no (see below)
144bdev_try_to_free_page: no (see below)
145
146->statfs() has s_umount (shared) when called by ustat(2) (native or
147compat), but that's an accident of bad API; s_umount is used to pin
148the superblock down when we only have dev_t given us by userland to
149identify the superblock. Everything else (statfs(), fstatfs(), etc.)
150doesn't hold it when calling ->statfs() - superblock is pinned down
151by resolving the pathname passed to syscall.
152->quota_read() and ->quota_write() functions are both guaranteed to
153be the only ones operating on the quota file by the quota code (via
154dqio_sem) (unless an admin really wants to screw up something and
155writes to quota files with quotas on). For other details about locking
156see also dquot_operations section.
157->bdev_try_to_free_page is called from the ->releasepage handler of
158the block device inode. See there for more details.
159
160--------------------------- file_system_type ---------------------------
161prototypes:
162    int (*get_sb) (struct file_system_type *, int,
163               const char *, void *, struct vfsmount *);
164    struct dentry *(*mount) (struct file_system_type *, int,
165               const char *, void *);
166    void (*kill_sb) (struct super_block *);
167locking rules:
168        may block
169get_sb yes
170mount yes
171kill_sb yes
172
173->get_sb() returns error or 0 with locked superblock attached to the vfsmount
174(exclusive on ->s_umount).
175->mount() returns ERR_PTR or the root dentry.
176->kill_sb() takes a write-locked superblock, does all shutdown work on it,
177unlocks and drops the reference.
178
179--------------------------- address_space_operations --------------------------
180prototypes:
181    int (*writepage)(struct page *page, struct writeback_control *wbc);
182    int (*readpage)(struct file *, struct page *);
183    int (*sync_page)(struct page *);
184    int (*writepages)(struct address_space *, struct writeback_control *);
185    int (*set_page_dirty)(struct page *page);
186    int (*readpages)(struct file *filp, struct address_space *mapping,
187            struct list_head *pages, unsigned nr_pages);
188    int (*write_begin)(struct file *, struct address_space *mapping,
189                loff_t pos, unsigned len, unsigned flags,
190                struct page **pagep, void **fsdata);
191    int (*write_end)(struct file *, struct address_space *mapping,
192                loff_t pos, unsigned len, unsigned copied,
193                struct page *page, void *fsdata);
194    sector_t (*bmap)(struct address_space *, sector_t);
195    int (*invalidatepage) (struct page *, unsigned long);
196    int (*releasepage) (struct page *, int);
197    void (*freepage)(struct page *);
198    int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
199            loff_t offset, unsigned long nr_segs);
200    int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
201                unsigned long *);
202    int (*migratepage)(struct address_space *, struct page *, struct page *);
203    int (*launder_page)(struct page *);
204    int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long);
205    int (*error_remove_page)(struct address_space *, struct page *);
206
207locking rules:
208    All except set_page_dirty and freepage may block
209
210            PageLocked(page) i_mutex
211writepage: yes, unlocks (see below)
212readpage: yes, unlocks
213sync_page: maybe
214writepages:
215set_page_dirty no
216readpages:
217write_begin: locks the page yes
218write_end: yes, unlocks yes
219bmap:
220invalidatepage: yes
221releasepage: yes
222freepage: yes
223direct_IO:
224get_xip_mem: maybe
225migratepage: yes (both)
226launder_page: yes
227is_partially_uptodate: yes
228error_remove_page: yes
229
230    ->write_begin(), ->write_end(), ->sync_page() and ->readpage()
231may be called from the request handler (/dev/loop).
232
233    ->readpage() unlocks the page, either synchronously or via I/O
234completion.
235
236    ->readpages() populates the pagecache with the passed pages and starts
237I/O against them. They come unlocked upon I/O completion.
238
239    ->writepage() is used for two purposes: for "memory cleansing" and for
240"sync". These are quite different operations and the behaviour may differ
241depending upon the mode.
242
243If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then
244it *must* start I/O against the page, even if that would involve
245blocking on in-progress I/O.
246
247If writepage is called for memory cleansing (sync_mode ==
248WBC_SYNC_NONE) then its role is to get as much writeout underway as
249possible. So writepage should try to avoid blocking against
250currently-in-progress I/O.
251
252If the filesystem is not called for "sync" and it determines that it
253would need to block against in-progress I/O to be able to start new I/O
254against the page the filesystem should redirty the page with
255redirty_page_for_writepage(), then unlock the page and return zero.
256This may also be done to avoid internal deadlocks, but rarely.
257
258If the filesystem is called for sync then it must wait on any
259in-progress I/O and then start new I/O.
260
261The filesystem should unlock the page synchronously, before returning to the
262caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE
263value. WRITEPAGE_ACTIVATE means that page cannot really be written out
264currently, and VM should stop calling ->writepage() on this page for some
265time. VM does this by moving page to the head of the active list, hence the
266name.
267
268Unless the filesystem is going to redirty_page_for_writepage(), unlock the page
269and return zero, writepage *must* run set_page_writeback() against the page,
270followed by unlocking it. Once set_page_writeback() has been run against the
271page, write I/O can be submitted and the write I/O completion handler must run
272end_page_writeback() once the I/O is complete. If no I/O is submitted, the
273filesystem must run end_page_writeback() against the page before returning from
274writepage.
275
276That is: after 2.5.12, pages which are under writeout are *not* locked. Note,
277if the filesystem needs the page to be locked during writeout, that is ok, too,
278the page is allowed to be unlocked at any point in time between the calls to
279set_page_writeback() and end_page_writeback().
280
281Note, failure to run either redirty_page_for_writepage() or the combination of
282set_page_writeback()/end_page_writeback() on a page submitted to writepage
283will leave the page itself marked clean but it will be tagged as dirty in the
284radix tree. This incoherency can lead to all sorts of hard-to-debug problems
285in the filesystem like having dirty inodes at umount and losing written data.
286
287    ->sync_page() locking rules are not well-defined - usually it is called
288with lock on page, but that is not guaranteed. Considering the currently
289existing instances of this method ->sync_page() itself doesn't look
290well-defined...
291
292    ->writepages() is used for periodic writeback and for syscall-initiated
293sync operations. The address_space should start I/O against at least
294*nr_to_write pages. *nr_to_write must be decremented for each page which is
295written. The address_space implementation may write more (or less) pages
296than *nr_to_write asks for, but it should try to be reasonably close. If
297nr_to_write is NULL, all dirty pages must be written.
298
299writepages should _only_ write pages which are present on
300mapping->io_pages.
301
302    ->set_page_dirty() is called from various places in the kernel
303when the target page is marked as needing writeback. It may be called
304under spinlock (it cannot block) and is sometimes called with the page
305not locked.
306
307    ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
308filesystems and by the swapper. The latter will eventually go away. Please,
309keep it that way and don't breed new callers.
310
311    ->invalidatepage() is called when the filesystem must attempt to drop
312some or all of the buffers from the page when it is being truncated. It
313returns zero on success. If ->invalidatepage is zero, the kernel uses
314block_invalidatepage() instead.
315
316    ->releasepage() is called when the kernel is about to try to drop the
317buffers from the page in preparation for freeing it. It returns zero to
318indicate that the buffers are (or may be) freeable. If ->releasepage is zero,
319the kernel assumes that the fs has no private interest in the buffers.
320
321    ->freepage() is called when the kernel is done dropping the page
322from the page cache.
323
324    ->launder_page() may be called prior to releasing a page if
325it is still found to be dirty. It returns zero if the page was successfully
326cleaned, or an error value if not. Note that in order to prevent the page
327getting mapped back in and redirtied, it needs to be kept locked
328across the entire operation.
329
330----------------------- file_lock_operations ------------------------------
331prototypes:
332    void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
333    void (*fl_release_private)(struct file_lock *);
334
335
336locking rules:
337            file_lock_lock may block
338fl_copy_lock: yes no
339fl_release_private: maybe no
340
341----------------------- lock_manager_operations ---------------------------
342prototypes:
343    int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
344    void (*fl_notify)(struct file_lock *); /* unblock callback */
345    int (*fl_grant)(struct file_lock *, struct file_lock *, int);
346    void (*fl_release_private)(struct file_lock *);
347    void (*fl_break)(struct file_lock *); /* break_lease callback */
348    int (*fl_change)(struct file_lock **, int);
349
350locking rules:
351            file_lock_lock may block
352fl_compare_owner: yes no
353fl_notify: yes no
354fl_grant: no no
355fl_release_private: maybe no
356fl_break: yes no
357fl_change yes no
358
359--------------------------- buffer_head -----------------------------------
360prototypes:
361    void (*b_end_io)(struct buffer_head *bh, int uptodate);
362
363locking rules:
364    called from interrupts. In other words, extreme care is needed here.
365bh is locked, but that's all warranties we have here. Currently only RAID1,
366highmem, fs/buffer.c, and fs/ntfs/aops.c are providing these. Block devices
367call this method upon the IO completion.
368
369--------------------------- block_device_operations -----------------------
370prototypes:
371    int (*open) (struct block_device *, fmode_t);
372    int (*release) (struct gendisk *, fmode_t);
373    int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
374    int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
375    int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *);
376    int (*media_changed) (struct gendisk *);
377    void (*unlock_native_capacity) (struct gendisk *);
378    int (*revalidate_disk) (struct gendisk *);
379    int (*getgeo)(struct block_device *, struct hd_geometry *);
380    void (*swap_slot_free_notify) (struct block_device *, unsigned long);
381
382locking rules:
383            bd_mutex
384open: yes
385release: yes
386ioctl: no
387compat_ioctl: no
388direct_access: no
389media_changed: no
390unlock_native_capacity: no
391revalidate_disk: no
392getgeo: no
393swap_slot_free_notify: no (see below)
394
395media_changed, unlock_native_capacity and revalidate_disk are called only from
396check_disk_change().
397
398swap_slot_free_notify is called with swap_lock and sometimes the page lock
399held.
400
401
402--------------------------- file_operations -------------------------------
403prototypes:
404    loff_t (*llseek) (struct file *, loff_t, int);
405    ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
406    ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
407    ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
408    ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
409    int (*readdir) (struct file *, void *, filldir_t);
410    unsigned int (*poll) (struct file *, struct poll_table_struct *);
411    long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
412    long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
413    int (*mmap) (struct file *, struct vm_area_struct *);
414    int (*open) (struct inode *, struct file *);
415    int (*flush) (struct file *);
416    int (*release) (struct inode *, struct file *);
417    int (*fsync) (struct file *, int datasync);
418    int (*aio_fsync) (struct kiocb *, int datasync);
419    int (*fasync) (int, struct file *, int);
420    int (*lock) (struct file *, int, struct file_lock *);
421    ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
422            loff_t *);
423    ssize_t (*writev) (struct file *, const struct iovec *, unsigned long,
424            loff_t *);
425    ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t,
426            void __user *);
427    ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
428            loff_t *, int);
429    unsigned long (*get_unmapped_area)(struct file *, unsigned long,
430            unsigned long, unsigned long, unsigned long);
431    int (*check_flags)(int);
432    int (*flock) (struct file *, int, struct file_lock *);
433    ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *,
434            size_t, unsigned int);
435    ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
436            size_t, unsigned int);
437    int (*setlease)(struct file *, long, struct file_lock **);
438    long (*fallocate)(struct file *, int, loff_t, loff_t);
439};
440
441locking rules:
442    All may block except for ->setlease.
443    No VFS locks held on entry except for ->fsync and ->setlease.
444
445->fsync() has i_mutex on inode.
446
447->setlease has the file_list_lock held and must not sleep.
448
449->llseek() locking has moved from llseek to the individual llseek
450implementations. If your fs is not using generic_file_llseek, you
451need to acquire and release the appropriate locks in your ->llseek().
452For many filesystems, it is probably safe to acquire the inode
453mutex or just to use i_size_read() instead.
454Note: this does not protect the file->f_pos against concurrent modifications
455since this is something the userspace has to take care about.
456
457->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
458Most instances call fasync_helper(), which does that maintenance, so it's
459not normally something one needs to worry about. Return values > 0 will be
460mapped to zero in the VFS layer.
461
462->readdir() and ->ioctl() on directories must be changed. Ideally we would
463move ->readdir() to inode_operations and use a separate method for directory
464->ioctl() or kill the latter completely. One of the problems is that for
465anything that resembles union-mount we won't have a struct file for all
466components. And there are other reasons why the current interface is a mess...
467
468->read on directories probably must go away - we should just enforce -EISDIR
469in sys_read() and friends.
470
471--------------------------- dquot_operations -------------------------------
472prototypes:
473    int (*write_dquot) (struct dquot *);
474    int (*acquire_dquot) (struct dquot *);
475    int (*release_dquot) (struct dquot *);
476    int (*mark_dirty) (struct dquot *);
477    int (*write_info) (struct super_block *, int);
478
479These operations are intended to be more or less wrapping functions that ensure
480a proper locking wrt the filesystem and call the generic quota operations.
481
482What filesystem should expect from the generic quota functions:
483
484        FS recursion Held locks when called
485write_dquot: yes dqonoff_sem or dqptr_sem
486acquire_dquot: yes dqonoff_sem or dqptr_sem
487release_dquot: yes dqonoff_sem or dqptr_sem
488mark_dirty: no -
489write_info: yes dqonoff_sem
490
491FS recursion means calling ->quota_read() and ->quota_write() from superblock
492operations.
493
494More details about quota locking can be found in fs/dquot.c.
495
496--------------------------- vm_operations_struct -----------------------------
497prototypes:
498    void (*open)(struct vm_area_struct*);
499    void (*close)(struct vm_area_struct*);
500    int (*fault)(struct vm_area_struct*, struct vm_fault *);
501    int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
502    int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
503
504locking rules:
505        mmap_sem PageLocked(page)
506open: yes
507close: yes
508fault: yes can return with page locked
509page_mkwrite: yes can return with page locked
510access: yes
511
512    ->fault() is called when a previously not present pte is about
513to be faulted in. The filesystem must find and return the page associated
514with the passed in "pgoff" in the vm_fault structure. If it is possible that
515the page may be truncated and/or invalidated, then the filesystem must lock
516the page, then ensure it is not already truncated (the page lock will block
517subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
518locked. The VM will unlock the page.
519
520    ->page_mkwrite() is called when a previously read-only pte is
521about to become writeable. The filesystem again must ensure that there are
522no truncate/invalidate races, and then return with the page locked. If
523the page has been truncated, the filesystem should not look up a new page
524like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
525will cause the VM to retry the fault.
526
527    ->access() is called when get_user_pages() fails in
528acces_process_vm(), typically used to debug a process through
529/proc/pid/mem or ptrace. This function is needed only for
530VM_IO | VM_PFNMAP VMAs.
531
532================================================================================
533            Dubious stuff
534
535(if you break something or notice that it is broken and do not fix it yourself
536- at least put it here)
537

Archive Download this file



interactive