Root/
1 | /* |
2 | * linux/fs/block_dev.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE |
6 | */ |
7 | |
8 | #include <linux/init.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/fcntl.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/kmod.h> |
13 | #include <linux/major.h> |
14 | #include <linux/device_cgroup.h> |
15 | #include <linux/highmem.h> |
16 | #include <linux/blkdev.h> |
17 | #include <linux/module.h> |
18 | #include <linux/blkpg.h> |
19 | #include <linux/magic.h> |
20 | #include <linux/buffer_head.h> |
21 | #include <linux/swap.h> |
22 | #include <linux/pagevec.h> |
23 | #include <linux/writeback.h> |
24 | #include <linux/mpage.h> |
25 | #include <linux/mount.h> |
26 | #include <linux/uio.h> |
27 | #include <linux/namei.h> |
28 | #include <linux/log2.h> |
29 | #include <linux/cleancache.h> |
30 | #include <linux/aio.h> |
31 | #include <asm/uaccess.h> |
32 | #include "internal.h" |
33 | |
34 | struct bdev_inode { |
35 | struct block_device bdev; |
36 | struct inode vfs_inode; |
37 | }; |
38 | |
39 | static const struct address_space_operations def_blk_aops; |
40 | |
41 | static inline struct bdev_inode *BDEV_I(struct inode *inode) |
42 | { |
43 | return container_of(inode, struct bdev_inode, vfs_inode); |
44 | } |
45 | |
46 | inline struct block_device *I_BDEV(struct inode *inode) |
47 | { |
48 | return &BDEV_I(inode)->bdev; |
49 | } |
50 | EXPORT_SYMBOL(I_BDEV); |
51 | |
52 | /* |
53 | * Move the inode from its current bdi to a new bdi. Make sure the inode |
54 | * is clean before moving so that it doesn't linger on the old bdi. |
55 | */ |
56 | static void bdev_inode_switch_bdi(struct inode *inode, |
57 | struct backing_dev_info *dst) |
58 | { |
59 | while (true) { |
60 | spin_lock(&inode->i_lock); |
61 | if (!(inode->i_state & I_DIRTY)) { |
62 | inode->i_data.backing_dev_info = dst; |
63 | spin_unlock(&inode->i_lock); |
64 | return; |
65 | } |
66 | spin_unlock(&inode->i_lock); |
67 | WARN_ON_ONCE(write_inode_now(inode, true)); |
68 | } |
69 | } |
70 | |
71 | /* Kill _all_ buffers and pagecache , dirty or not.. */ |
72 | void kill_bdev(struct block_device *bdev) |
73 | { |
74 | struct address_space *mapping = bdev->bd_inode->i_mapping; |
75 | |
76 | if (mapping->nrpages == 0 && mapping->nrshadows == 0) |
77 | return; |
78 | |
79 | invalidate_bh_lrus(); |
80 | truncate_inode_pages(mapping, 0); |
81 | } |
82 | EXPORT_SYMBOL(kill_bdev); |
83 | |
84 | /* Invalidate clean unused buffers and pagecache. */ |
85 | void invalidate_bdev(struct block_device *bdev) |
86 | { |
87 | struct address_space *mapping = bdev->bd_inode->i_mapping; |
88 | |
89 | if (mapping->nrpages == 0) |
90 | return; |
91 | |
92 | invalidate_bh_lrus(); |
93 | lru_add_drain_all(); /* make sure all lru add caches are flushed */ |
94 | invalidate_mapping_pages(mapping, 0, -1); |
95 | /* 99% of the time, we don't need to flush the cleancache on the bdev. |
96 | * But, for the strange corners, lets be cautious |
97 | */ |
98 | cleancache_invalidate_inode(mapping); |
99 | } |
100 | EXPORT_SYMBOL(invalidate_bdev); |
101 | |
102 | int set_blocksize(struct block_device *bdev, int size) |
103 | { |
104 | /* Size must be a power of two, and between 512 and PAGE_SIZE */ |
105 | if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) |
106 | return -EINVAL; |
107 | |
108 | /* Size cannot be smaller than the size supported by the device */ |
109 | if (size < bdev_logical_block_size(bdev)) |
110 | return -EINVAL; |
111 | |
112 | /* Don't change the size if it is same as current */ |
113 | if (bdev->bd_block_size != size) { |
114 | sync_blockdev(bdev); |
115 | bdev->bd_block_size = size; |
116 | bdev->bd_inode->i_blkbits = blksize_bits(size); |
117 | kill_bdev(bdev); |
118 | } |
119 | return 0; |
120 | } |
121 | |
122 | EXPORT_SYMBOL(set_blocksize); |
123 | |
124 | int sb_set_blocksize(struct super_block *sb, int size) |
125 | { |
126 | if (set_blocksize(sb->s_bdev, size)) |
127 | return 0; |
128 | /* If we get here, we know size is power of two |
129 | * and it's value is between 512 and PAGE_SIZE */ |
130 | sb->s_blocksize = size; |
131 | sb->s_blocksize_bits = blksize_bits(size); |
132 | return sb->s_blocksize; |
133 | } |
134 | |
135 | EXPORT_SYMBOL(sb_set_blocksize); |
136 | |
137 | int sb_min_blocksize(struct super_block *sb, int size) |
138 | { |
139 | int minsize = bdev_logical_block_size(sb->s_bdev); |
140 | if (size < minsize) |
141 | size = minsize; |
142 | return sb_set_blocksize(sb, size); |
143 | } |
144 | |
145 | EXPORT_SYMBOL(sb_min_blocksize); |
146 | |
147 | static int |
148 | blkdev_get_block(struct inode *inode, sector_t iblock, |
149 | struct buffer_head *bh, int create) |
150 | { |
151 | bh->b_bdev = I_BDEV(inode); |
152 | bh->b_blocknr = iblock; |
153 | set_buffer_mapped(bh); |
154 | return 0; |
155 | } |
156 | |
157 | static ssize_t |
158 | blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, |
159 | loff_t offset) |
160 | { |
161 | struct file *file = iocb->ki_filp; |
162 | struct inode *inode = file->f_mapping->host; |
163 | |
164 | return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter, |
165 | offset, blkdev_get_block, |
166 | NULL, NULL, 0); |
167 | } |
168 | |
169 | int __sync_blockdev(struct block_device *bdev, int wait) |
170 | { |
171 | if (!bdev) |
172 | return 0; |
173 | if (!wait) |
174 | return filemap_flush(bdev->bd_inode->i_mapping); |
175 | return filemap_write_and_wait(bdev->bd_inode->i_mapping); |
176 | } |
177 | |
178 | /* |
179 | * Write out and wait upon all the dirty data associated with a block |
180 | * device via its mapping. Does not take the superblock lock. |
181 | */ |
182 | int sync_blockdev(struct block_device *bdev) |
183 | { |
184 | return __sync_blockdev(bdev, 1); |
185 | } |
186 | EXPORT_SYMBOL(sync_blockdev); |
187 | |
188 | /* |
189 | * Write out and wait upon all dirty data associated with this |
190 | * device. Filesystem data as well as the underlying block |
191 | * device. Takes the superblock lock. |
192 | */ |
193 | int fsync_bdev(struct block_device *bdev) |
194 | { |
195 | struct super_block *sb = get_super(bdev); |
196 | if (sb) { |
197 | int res = sync_filesystem(sb); |
198 | drop_super(sb); |
199 | return res; |
200 | } |
201 | return sync_blockdev(bdev); |
202 | } |
203 | EXPORT_SYMBOL(fsync_bdev); |
204 | |
205 | /** |
206 | * freeze_bdev -- lock a filesystem and force it into a consistent state |
207 | * @bdev: blockdevice to lock |
208 | * |
209 | * If a superblock is found on this device, we take the s_umount semaphore |
210 | * on it to make sure nobody unmounts until the snapshot creation is done. |
211 | * The reference counter (bd_fsfreeze_count) guarantees that only the last |
212 | * unfreeze process can unfreeze the frozen filesystem actually when multiple |
213 | * freeze requests arrive simultaneously. It counts up in freeze_bdev() and |
214 | * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze |
215 | * actually. |
216 | */ |
217 | struct super_block *freeze_bdev(struct block_device *bdev) |
218 | { |
219 | struct super_block *sb; |
220 | int error = 0; |
221 | |
222 | mutex_lock(&bdev->bd_fsfreeze_mutex); |
223 | if (++bdev->bd_fsfreeze_count > 1) { |
224 | /* |
225 | * We don't even need to grab a reference - the first call |
226 | * to freeze_bdev grab an active reference and only the last |
227 | * thaw_bdev drops it. |
228 | */ |
229 | sb = get_super(bdev); |
230 | drop_super(sb); |
231 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
232 | return sb; |
233 | } |
234 | |
235 | sb = get_active_super(bdev); |
236 | if (!sb) |
237 | goto out; |
238 | error = freeze_super(sb); |
239 | if (error) { |
240 | deactivate_super(sb); |
241 | bdev->bd_fsfreeze_count--; |
242 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
243 | return ERR_PTR(error); |
244 | } |
245 | deactivate_super(sb); |
246 | out: |
247 | sync_blockdev(bdev); |
248 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
249 | return sb; /* thaw_bdev releases s->s_umount */ |
250 | } |
251 | EXPORT_SYMBOL(freeze_bdev); |
252 | |
253 | /** |
254 | * thaw_bdev -- unlock filesystem |
255 | * @bdev: blockdevice to unlock |
256 | * @sb: associated superblock |
257 | * |
258 | * Unlocks the filesystem and marks it writeable again after freeze_bdev(). |
259 | */ |
260 | int thaw_bdev(struct block_device *bdev, struct super_block *sb) |
261 | { |
262 | int error = -EINVAL; |
263 | |
264 | mutex_lock(&bdev->bd_fsfreeze_mutex); |
265 | if (!bdev->bd_fsfreeze_count) |
266 | goto out; |
267 | |
268 | error = 0; |
269 | if (--bdev->bd_fsfreeze_count > 0) |
270 | goto out; |
271 | |
272 | if (!sb) |
273 | goto out; |
274 | |
275 | error = thaw_super(sb); |
276 | if (error) { |
277 | bdev->bd_fsfreeze_count++; |
278 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
279 | return error; |
280 | } |
281 | out: |
282 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
283 | return 0; |
284 | } |
285 | EXPORT_SYMBOL(thaw_bdev); |
286 | |
287 | static int blkdev_writepage(struct page *page, struct writeback_control *wbc) |
288 | { |
289 | return block_write_full_page(page, blkdev_get_block, wbc); |
290 | } |
291 | |
292 | static int blkdev_readpage(struct file * file, struct page * page) |
293 | { |
294 | return block_read_full_page(page, blkdev_get_block); |
295 | } |
296 | |
297 | static int blkdev_readpages(struct file *file, struct address_space *mapping, |
298 | struct list_head *pages, unsigned nr_pages) |
299 | { |
300 | return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block); |
301 | } |
302 | |
303 | static int blkdev_write_begin(struct file *file, struct address_space *mapping, |
304 | loff_t pos, unsigned len, unsigned flags, |
305 | struct page **pagep, void **fsdata) |
306 | { |
307 | return block_write_begin(mapping, pos, len, flags, pagep, |
308 | blkdev_get_block); |
309 | } |
310 | |
311 | static int blkdev_write_end(struct file *file, struct address_space *mapping, |
312 | loff_t pos, unsigned len, unsigned copied, |
313 | struct page *page, void *fsdata) |
314 | { |
315 | int ret; |
316 | ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); |
317 | |
318 | unlock_page(page); |
319 | page_cache_release(page); |
320 | |
321 | return ret; |
322 | } |
323 | |
324 | /* |
325 | * private llseek: |
326 | * for a block special file file_inode(file)->i_size is zero |
327 | * so we compute the size by hand (just as in block_read/write above) |
328 | */ |
329 | static loff_t block_llseek(struct file *file, loff_t offset, int whence) |
330 | { |
331 | struct inode *bd_inode = file->f_mapping->host; |
332 | loff_t retval; |
333 | |
334 | mutex_lock(&bd_inode->i_mutex); |
335 | retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); |
336 | mutex_unlock(&bd_inode->i_mutex); |
337 | return retval; |
338 | } |
339 | |
340 | int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) |
341 | { |
342 | struct inode *bd_inode = filp->f_mapping->host; |
343 | struct block_device *bdev = I_BDEV(bd_inode); |
344 | int error; |
345 | |
346 | error = filemap_write_and_wait_range(filp->f_mapping, start, end); |
347 | if (error) |
348 | return error; |
349 | |
350 | /* |
351 | * There is no need to serialise calls to blkdev_issue_flush with |
352 | * i_mutex and doing so causes performance issues with concurrent |
353 | * O_SYNC writers to a block device. |
354 | */ |
355 | error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL); |
356 | if (error == -EOPNOTSUPP) |
357 | error = 0; |
358 | |
359 | return error; |
360 | } |
361 | EXPORT_SYMBOL(blkdev_fsync); |
362 | |
363 | /** |
364 | * bdev_read_page() - Start reading a page from a block device |
365 | * @bdev: The device to read the page from |
366 | * @sector: The offset on the device to read the page to (need not be aligned) |
367 | * @page: The page to read |
368 | * |
369 | * On entry, the page should be locked. It will be unlocked when the page |
370 | * has been read. If the block driver implements rw_page synchronously, |
371 | * that will be true on exit from this function, but it need not be. |
372 | * |
373 | * Errors returned by this function are usually "soft", eg out of memory, or |
374 | * queue full; callers should try a different route to read this page rather |
375 | * than propagate an error back up the stack. |
376 | * |
377 | * Return: negative errno if an error occurs, 0 if submission was successful. |
378 | */ |
379 | int bdev_read_page(struct block_device *bdev, sector_t sector, |
380 | struct page *page) |
381 | { |
382 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
383 | if (!ops->rw_page) |
384 | return -EOPNOTSUPP; |
385 | return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); |
386 | } |
387 | EXPORT_SYMBOL_GPL(bdev_read_page); |
388 | |
389 | /** |
390 | * bdev_write_page() - Start writing a page to a block device |
391 | * @bdev: The device to write the page to |
392 | * @sector: The offset on the device to write the page to (need not be aligned) |
393 | * @page: The page to write |
394 | * @wbc: The writeback_control for the write |
395 | * |
396 | * On entry, the page should be locked and not currently under writeback. |
397 | * On exit, if the write started successfully, the page will be unlocked and |
398 | * under writeback. If the write failed already (eg the driver failed to |
399 | * queue the page to the device), the page will still be locked. If the |
400 | * caller is a ->writepage implementation, it will need to unlock the page. |
401 | * |
402 | * Errors returned by this function are usually "soft", eg out of memory, or |
403 | * queue full; callers should try a different route to write this page rather |
404 | * than propagate an error back up the stack. |
405 | * |
406 | * Return: negative errno if an error occurs, 0 if submission was successful. |
407 | */ |
408 | int bdev_write_page(struct block_device *bdev, sector_t sector, |
409 | struct page *page, struct writeback_control *wbc) |
410 | { |
411 | int result; |
412 | int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; |
413 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
414 | if (!ops->rw_page) |
415 | return -EOPNOTSUPP; |
416 | set_page_writeback(page); |
417 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); |
418 | if (result) |
419 | end_page_writeback(page); |
420 | else |
421 | unlock_page(page); |
422 | return result; |
423 | } |
424 | EXPORT_SYMBOL_GPL(bdev_write_page); |
425 | |
426 | /* |
427 | * pseudo-fs |
428 | */ |
429 | |
430 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); |
431 | static struct kmem_cache * bdev_cachep __read_mostly; |
432 | |
433 | static struct inode *bdev_alloc_inode(struct super_block *sb) |
434 | { |
435 | struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); |
436 | if (!ei) |
437 | return NULL; |
438 | return &ei->vfs_inode; |
439 | } |
440 | |
441 | static void bdev_i_callback(struct rcu_head *head) |
442 | { |
443 | struct inode *inode = container_of(head, struct inode, i_rcu); |
444 | struct bdev_inode *bdi = BDEV_I(inode); |
445 | |
446 | kmem_cache_free(bdev_cachep, bdi); |
447 | } |
448 | |
449 | static void bdev_destroy_inode(struct inode *inode) |
450 | { |
451 | call_rcu(&inode->i_rcu, bdev_i_callback); |
452 | } |
453 | |
454 | static void init_once(void *foo) |
455 | { |
456 | struct bdev_inode *ei = (struct bdev_inode *) foo; |
457 | struct block_device *bdev = &ei->bdev; |
458 | |
459 | memset(bdev, 0, sizeof(*bdev)); |
460 | mutex_init(&bdev->bd_mutex); |
461 | INIT_LIST_HEAD(&bdev->bd_inodes); |
462 | INIT_LIST_HEAD(&bdev->bd_list); |
463 | #ifdef CONFIG_SYSFS |
464 | INIT_LIST_HEAD(&bdev->bd_holder_disks); |
465 | #endif |
466 | inode_init_once(&ei->vfs_inode); |
467 | /* Initialize mutex for freeze. */ |
468 | mutex_init(&bdev->bd_fsfreeze_mutex); |
469 | } |
470 | |
471 | static inline void __bd_forget(struct inode *inode) |
472 | { |
473 | list_del_init(&inode->i_devices); |
474 | inode->i_bdev = NULL; |
475 | inode->i_mapping = &inode->i_data; |
476 | } |
477 | |
478 | static void bdev_evict_inode(struct inode *inode) |
479 | { |
480 | struct block_device *bdev = &BDEV_I(inode)->bdev; |
481 | struct list_head *p; |
482 | truncate_inode_pages_final(&inode->i_data); |
483 | invalidate_inode_buffers(inode); /* is it needed here? */ |
484 | clear_inode(inode); |
485 | spin_lock(&bdev_lock); |
486 | while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) { |
487 | __bd_forget(list_entry(p, struct inode, i_devices)); |
488 | } |
489 | list_del_init(&bdev->bd_list); |
490 | spin_unlock(&bdev_lock); |
491 | } |
492 | |
493 | static const struct super_operations bdev_sops = { |
494 | .statfs = simple_statfs, |
495 | .alloc_inode = bdev_alloc_inode, |
496 | .destroy_inode = bdev_destroy_inode, |
497 | .drop_inode = generic_delete_inode, |
498 | .evict_inode = bdev_evict_inode, |
499 | }; |
500 | |
501 | static struct dentry *bd_mount(struct file_system_type *fs_type, |
502 | int flags, const char *dev_name, void *data) |
503 | { |
504 | return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC); |
505 | } |
506 | |
507 | static struct file_system_type bd_type = { |
508 | .name = "bdev", |
509 | .mount = bd_mount, |
510 | .kill_sb = kill_anon_super, |
511 | }; |
512 | |
513 | static struct super_block *blockdev_superblock __read_mostly; |
514 | |
515 | void __init bdev_cache_init(void) |
516 | { |
517 | int err; |
518 | static struct vfsmount *bd_mnt; |
519 | |
520 | bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), |
521 | 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| |
522 | SLAB_MEM_SPREAD|SLAB_PANIC), |
523 | init_once); |
524 | err = register_filesystem(&bd_type); |
525 | if (err) |
526 | panic("Cannot register bdev pseudo-fs"); |
527 | bd_mnt = kern_mount(&bd_type); |
528 | if (IS_ERR(bd_mnt)) |
529 | panic("Cannot create bdev pseudo-fs"); |
530 | blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ |
531 | } |
532 | |
533 | /* |
534 | * Most likely _very_ bad one - but then it's hardly critical for small |
535 | * /dev and can be fixed when somebody will need really large one. |
536 | * Keep in mind that it will be fed through icache hash function too. |
537 | */ |
538 | static inline unsigned long hash(dev_t dev) |
539 | { |
540 | return MAJOR(dev)+MINOR(dev); |
541 | } |
542 | |
543 | static int bdev_test(struct inode *inode, void *data) |
544 | { |
545 | return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; |
546 | } |
547 | |
548 | static int bdev_set(struct inode *inode, void *data) |
549 | { |
550 | BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; |
551 | return 0; |
552 | } |
553 | |
554 | static LIST_HEAD(all_bdevs); |
555 | |
556 | struct block_device *bdget(dev_t dev) |
557 | { |
558 | struct block_device *bdev; |
559 | struct inode *inode; |
560 | |
561 | inode = iget5_locked(blockdev_superblock, hash(dev), |
562 | bdev_test, bdev_set, &dev); |
563 | |
564 | if (!inode) |
565 | return NULL; |
566 | |
567 | bdev = &BDEV_I(inode)->bdev; |
568 | |
569 | if (inode->i_state & I_NEW) { |
570 | bdev->bd_contains = NULL; |
571 | bdev->bd_super = NULL; |
572 | bdev->bd_inode = inode; |
573 | bdev->bd_block_size = (1 << inode->i_blkbits); |
574 | bdev->bd_part_count = 0; |
575 | bdev->bd_invalidated = 0; |
576 | inode->i_mode = S_IFBLK; |
577 | inode->i_rdev = dev; |
578 | inode->i_bdev = bdev; |
579 | inode->i_data.a_ops = &def_blk_aops; |
580 | mapping_set_gfp_mask(&inode->i_data, GFP_USER); |
581 | inode->i_data.backing_dev_info = &default_backing_dev_info; |
582 | spin_lock(&bdev_lock); |
583 | list_add(&bdev->bd_list, &all_bdevs); |
584 | spin_unlock(&bdev_lock); |
585 | unlock_new_inode(inode); |
586 | } |
587 | return bdev; |
588 | } |
589 | |
590 | EXPORT_SYMBOL(bdget); |
591 | |
592 | /** |
593 | * bdgrab -- Grab a reference to an already referenced block device |
594 | * @bdev: Block device to grab a reference to. |
595 | */ |
596 | struct block_device *bdgrab(struct block_device *bdev) |
597 | { |
598 | ihold(bdev->bd_inode); |
599 | return bdev; |
600 | } |
601 | EXPORT_SYMBOL(bdgrab); |
602 | |
603 | long nr_blockdev_pages(void) |
604 | { |
605 | struct block_device *bdev; |
606 | long ret = 0; |
607 | spin_lock(&bdev_lock); |
608 | list_for_each_entry(bdev, &all_bdevs, bd_list) { |
609 | ret += bdev->bd_inode->i_mapping->nrpages; |
610 | } |
611 | spin_unlock(&bdev_lock); |
612 | return ret; |
613 | } |
614 | |
615 | void bdput(struct block_device *bdev) |
616 | { |
617 | iput(bdev->bd_inode); |
618 | } |
619 | |
620 | EXPORT_SYMBOL(bdput); |
621 | |
622 | static struct block_device *bd_acquire(struct inode *inode) |
623 | { |
624 | struct block_device *bdev; |
625 | |
626 | spin_lock(&bdev_lock); |
627 | bdev = inode->i_bdev; |
628 | if (bdev) { |
629 | ihold(bdev->bd_inode); |
630 | spin_unlock(&bdev_lock); |
631 | return bdev; |
632 | } |
633 | spin_unlock(&bdev_lock); |
634 | |
635 | bdev = bdget(inode->i_rdev); |
636 | if (bdev) { |
637 | spin_lock(&bdev_lock); |
638 | if (!inode->i_bdev) { |
639 | /* |
640 | * We take an additional reference to bd_inode, |
641 | * and it's released in clear_inode() of inode. |
642 | * So, we can access it via ->i_mapping always |
643 | * without igrab(). |
644 | */ |
645 | ihold(bdev->bd_inode); |
646 | inode->i_bdev = bdev; |
647 | inode->i_mapping = bdev->bd_inode->i_mapping; |
648 | list_add(&inode->i_devices, &bdev->bd_inodes); |
649 | } |
650 | spin_unlock(&bdev_lock); |
651 | } |
652 | return bdev; |
653 | } |
654 | |
655 | int sb_is_blkdev_sb(struct super_block *sb) |
656 | { |
657 | return sb == blockdev_superblock; |
658 | } |
659 | |
660 | /* Call when you free inode */ |
661 | |
662 | void bd_forget(struct inode *inode) |
663 | { |
664 | struct block_device *bdev = NULL; |
665 | |
666 | spin_lock(&bdev_lock); |
667 | if (!sb_is_blkdev_sb(inode->i_sb)) |
668 | bdev = inode->i_bdev; |
669 | __bd_forget(inode); |
670 | spin_unlock(&bdev_lock); |
671 | |
672 | if (bdev) |
673 | iput(bdev->bd_inode); |
674 | } |
675 | |
676 | /** |
677 | * bd_may_claim - test whether a block device can be claimed |
678 | * @bdev: block device of interest |
679 | * @whole: whole block device containing @bdev, may equal @bdev |
680 | * @holder: holder trying to claim @bdev |
681 | * |
682 | * Test whether @bdev can be claimed by @holder. |
683 | * |
684 | * CONTEXT: |
685 | * spin_lock(&bdev_lock). |
686 | * |
687 | * RETURNS: |
688 | * %true if @bdev can be claimed, %false otherwise. |
689 | */ |
690 | static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, |
691 | void *holder) |
692 | { |
693 | if (bdev->bd_holder == holder) |
694 | return true; /* already a holder */ |
695 | else if (bdev->bd_holder != NULL) |
696 | return false; /* held by someone else */ |
697 | else if (bdev->bd_contains == bdev) |
698 | return true; /* is a whole device which isn't held */ |
699 | |
700 | else if (whole->bd_holder == bd_may_claim) |
701 | return true; /* is a partition of a device that is being partitioned */ |
702 | else if (whole->bd_holder != NULL) |
703 | return false; /* is a partition of a held device */ |
704 | else |
705 | return true; /* is a partition of an un-held device */ |
706 | } |
707 | |
708 | /** |
709 | * bd_prepare_to_claim - prepare to claim a block device |
710 | * @bdev: block device of interest |
711 | * @whole: the whole device containing @bdev, may equal @bdev |
712 | * @holder: holder trying to claim @bdev |
713 | * |
714 | * Prepare to claim @bdev. This function fails if @bdev is already |
715 | * claimed by another holder and waits if another claiming is in |
716 | * progress. This function doesn't actually claim. On successful |
717 | * return, the caller has ownership of bd_claiming and bd_holder[s]. |
718 | * |
719 | * CONTEXT: |
720 | * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab |
721 | * it multiple times. |
722 | * |
723 | * RETURNS: |
724 | * 0 if @bdev can be claimed, -EBUSY otherwise. |
725 | */ |
726 | static int bd_prepare_to_claim(struct block_device *bdev, |
727 | struct block_device *whole, void *holder) |
728 | { |
729 | retry: |
730 | /* if someone else claimed, fail */ |
731 | if (!bd_may_claim(bdev, whole, holder)) |
732 | return -EBUSY; |
733 | |
734 | /* if claiming is already in progress, wait for it to finish */ |
735 | if (whole->bd_claiming) { |
736 | wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); |
737 | DEFINE_WAIT(wait); |
738 | |
739 | prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); |
740 | spin_unlock(&bdev_lock); |
741 | schedule(); |
742 | finish_wait(wq, &wait); |
743 | spin_lock(&bdev_lock); |
744 | goto retry; |
745 | } |
746 | |
747 | /* yay, all mine */ |
748 | return 0; |
749 | } |
750 | |
751 | /** |
752 | * bd_start_claiming - start claiming a block device |
753 | * @bdev: block device of interest |
754 | * @holder: holder trying to claim @bdev |
755 | * |
756 | * @bdev is about to be opened exclusively. Check @bdev can be opened |
757 | * exclusively and mark that an exclusive open is in progress. Each |
758 | * successful call to this function must be matched with a call to |
759 | * either bd_finish_claiming() or bd_abort_claiming() (which do not |
760 | * fail). |
761 | * |
762 | * This function is used to gain exclusive access to the block device |
763 | * without actually causing other exclusive open attempts to fail. It |
764 | * should be used when the open sequence itself requires exclusive |
765 | * access but may subsequently fail. |
766 | * |
767 | * CONTEXT: |
768 | * Might sleep. |
769 | * |
770 | * RETURNS: |
771 | * Pointer to the block device containing @bdev on success, ERR_PTR() |
772 | * value on failure. |
773 | */ |
774 | static struct block_device *bd_start_claiming(struct block_device *bdev, |
775 | void *holder) |
776 | { |
777 | struct gendisk *disk; |
778 | struct block_device *whole; |
779 | int partno, err; |
780 | |
781 | might_sleep(); |
782 | |
783 | /* |
784 | * @bdev might not have been initialized properly yet, look up |
785 | * and grab the outer block device the hard way. |
786 | */ |
787 | disk = get_gendisk(bdev->bd_dev, &partno); |
788 | if (!disk) |
789 | return ERR_PTR(-ENXIO); |
790 | |
791 | /* |
792 | * Normally, @bdev should equal what's returned from bdget_disk() |
793 | * if partno is 0; however, some drivers (floppy) use multiple |
794 | * bdev's for the same physical device and @bdev may be one of the |
795 | * aliases. Keep @bdev if partno is 0. This means claimer |
796 | * tracking is broken for those devices but it has always been that |
797 | * way. |
798 | */ |
799 | if (partno) |
800 | whole = bdget_disk(disk, 0); |
801 | else |
802 | whole = bdgrab(bdev); |
803 | |
804 | module_put(disk->fops->owner); |
805 | put_disk(disk); |
806 | if (!whole) |
807 | return ERR_PTR(-ENOMEM); |
808 | |
809 | /* prepare to claim, if successful, mark claiming in progress */ |
810 | spin_lock(&bdev_lock); |
811 | |
812 | err = bd_prepare_to_claim(bdev, whole, holder); |
813 | if (err == 0) { |
814 | whole->bd_claiming = holder; |
815 | spin_unlock(&bdev_lock); |
816 | return whole; |
817 | } else { |
818 | spin_unlock(&bdev_lock); |
819 | bdput(whole); |
820 | return ERR_PTR(err); |
821 | } |
822 | } |
823 | |
824 | #ifdef CONFIG_SYSFS |
825 | struct bd_holder_disk { |
826 | struct list_head list; |
827 | struct gendisk *disk; |
828 | int refcnt; |
829 | }; |
830 | |
831 | static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev, |
832 | struct gendisk *disk) |
833 | { |
834 | struct bd_holder_disk *holder; |
835 | |
836 | list_for_each_entry(holder, &bdev->bd_holder_disks, list) |
837 | if (holder->disk == disk) |
838 | return holder; |
839 | return NULL; |
840 | } |
841 | |
842 | static int add_symlink(struct kobject *from, struct kobject *to) |
843 | { |
844 | return sysfs_create_link(from, to, kobject_name(to)); |
845 | } |
846 | |
847 | static void del_symlink(struct kobject *from, struct kobject *to) |
848 | { |
849 | sysfs_remove_link(from, kobject_name(to)); |
850 | } |
851 | |
852 | /** |
853 | * bd_link_disk_holder - create symlinks between holding disk and slave bdev |
854 | * @bdev: the claimed slave bdev |
855 | * @disk: the holding disk |
856 | * |
857 | * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. |
858 | * |
859 | * This functions creates the following sysfs symlinks. |
860 | * |
861 | * - from "slaves" directory of the holder @disk to the claimed @bdev |
862 | * - from "holders" directory of the @bdev to the holder @disk |
863 | * |
864 | * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is |
865 | * passed to bd_link_disk_holder(), then: |
866 | * |
867 | * /sys/block/dm-0/slaves/sda --> /sys/block/sda |
868 | * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 |
869 | * |
870 | * The caller must have claimed @bdev before calling this function and |
871 | * ensure that both @bdev and @disk are valid during the creation and |
872 | * lifetime of these symlinks. |
873 | * |
874 | * CONTEXT: |
875 | * Might sleep. |
876 | * |
877 | * RETURNS: |
878 | * 0 on success, -errno on failure. |
879 | */ |
880 | int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) |
881 | { |
882 | struct bd_holder_disk *holder; |
883 | int ret = 0; |
884 | |
885 | mutex_lock(&bdev->bd_mutex); |
886 | |
887 | WARN_ON_ONCE(!bdev->bd_holder); |
888 | |
889 | /* FIXME: remove the following once add_disk() handles errors */ |
890 | if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) |
891 | goto out_unlock; |
892 | |
893 | holder = bd_find_holder_disk(bdev, disk); |
894 | if (holder) { |
895 | holder->refcnt++; |
896 | goto out_unlock; |
897 | } |
898 | |
899 | holder = kzalloc(sizeof(*holder), GFP_KERNEL); |
900 | if (!holder) { |
901 | ret = -ENOMEM; |
902 | goto out_unlock; |
903 | } |
904 | |
905 | INIT_LIST_HEAD(&holder->list); |
906 | holder->disk = disk; |
907 | holder->refcnt = 1; |
908 | |
909 | ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
910 | if (ret) |
911 | goto out_free; |
912 | |
913 | ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); |
914 | if (ret) |
915 | goto out_del; |
916 | /* |
917 | * bdev could be deleted beneath us which would implicitly destroy |
918 | * the holder directory. Hold on to it. |
919 | */ |
920 | kobject_get(bdev->bd_part->holder_dir); |
921 | |
922 | list_add(&holder->list, &bdev->bd_holder_disks); |
923 | goto out_unlock; |
924 | |
925 | out_del: |
926 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
927 | out_free: |
928 | kfree(holder); |
929 | out_unlock: |
930 | mutex_unlock(&bdev->bd_mutex); |
931 | return ret; |
932 | } |
933 | EXPORT_SYMBOL_GPL(bd_link_disk_holder); |
934 | |
935 | /** |
936 | * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder() |
937 | * @bdev: the calimed slave bdev |
938 | * @disk: the holding disk |
939 | * |
940 | * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. |
941 | * |
942 | * CONTEXT: |
943 | * Might sleep. |
944 | */ |
945 | void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) |
946 | { |
947 | struct bd_holder_disk *holder; |
948 | |
949 | mutex_lock(&bdev->bd_mutex); |
950 | |
951 | holder = bd_find_holder_disk(bdev, disk); |
952 | |
953 | if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) { |
954 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
955 | del_symlink(bdev->bd_part->holder_dir, |
956 | &disk_to_dev(disk)->kobj); |
957 | kobject_put(bdev->bd_part->holder_dir); |
958 | list_del_init(&holder->list); |
959 | kfree(holder); |
960 | } |
961 | |
962 | mutex_unlock(&bdev->bd_mutex); |
963 | } |
964 | EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); |
965 | #endif |
966 | |
967 | /** |
968 | * flush_disk - invalidates all buffer-cache entries on a disk |
969 | * |
970 | * @bdev: struct block device to be flushed |
971 | * @kill_dirty: flag to guide handling of dirty inodes |
972 | * |
973 | * Invalidates all buffer-cache entries on a disk. It should be called |
974 | * when a disk has been changed -- either by a media change or online |
975 | * resize. |
976 | */ |
977 | static void flush_disk(struct block_device *bdev, bool kill_dirty) |
978 | { |
979 | if (__invalidate_device(bdev, kill_dirty)) { |
980 | char name[BDEVNAME_SIZE] = ""; |
981 | |
982 | if (bdev->bd_disk) |
983 | disk_name(bdev->bd_disk, 0, name); |
984 | printk(KERN_WARNING "VFS: busy inodes on changed media or " |
985 | "resized disk %s\n", name); |
986 | } |
987 | |
988 | if (!bdev->bd_disk) |
989 | return; |
990 | if (disk_part_scan_enabled(bdev->bd_disk)) |
991 | bdev->bd_invalidated = 1; |
992 | } |
993 | |
994 | /** |
995 | * check_disk_size_change - checks for disk size change and adjusts bdev size. |
996 | * @disk: struct gendisk to check |
997 | * @bdev: struct bdev to adjust. |
998 | * |
999 | * This routine checks to see if the bdev size does not match the disk size |
1000 | * and adjusts it if it differs. |
1001 | */ |
1002 | void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) |
1003 | { |
1004 | loff_t disk_size, bdev_size; |
1005 | |
1006 | disk_size = (loff_t)get_capacity(disk) << 9; |
1007 | bdev_size = i_size_read(bdev->bd_inode); |
1008 | if (disk_size != bdev_size) { |
1009 | char name[BDEVNAME_SIZE]; |
1010 | |
1011 | disk_name(disk, 0, name); |
1012 | printk(KERN_INFO |
1013 | "%s: detected capacity change from %lld to %lld\n", |
1014 | name, bdev_size, disk_size); |
1015 | i_size_write(bdev->bd_inode, disk_size); |
1016 | flush_disk(bdev, false); |
1017 | } |
1018 | } |
1019 | EXPORT_SYMBOL(check_disk_size_change); |
1020 | |
1021 | /** |
1022 | * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back |
1023 | * @disk: struct gendisk to be revalidated |
1024 | * |
1025 | * This routine is a wrapper for lower-level driver's revalidate_disk |
1026 | * call-backs. It is used to do common pre and post operations needed |
1027 | * for all revalidate_disk operations. |
1028 | */ |
1029 | int revalidate_disk(struct gendisk *disk) |
1030 | { |
1031 | struct block_device *bdev; |
1032 | int ret = 0; |
1033 | |
1034 | if (disk->fops->revalidate_disk) |
1035 | ret = disk->fops->revalidate_disk(disk); |
1036 | |
1037 | bdev = bdget_disk(disk, 0); |
1038 | if (!bdev) |
1039 | return ret; |
1040 | |
1041 | mutex_lock(&bdev->bd_mutex); |
1042 | check_disk_size_change(disk, bdev); |
1043 | bdev->bd_invalidated = 0; |
1044 | mutex_unlock(&bdev->bd_mutex); |
1045 | bdput(bdev); |
1046 | return ret; |
1047 | } |
1048 | EXPORT_SYMBOL(revalidate_disk); |
1049 | |
1050 | /* |
1051 | * This routine checks whether a removable media has been changed, |
1052 | * and invalidates all buffer-cache-entries in that case. This |
1053 | * is a relatively slow routine, so we have to try to minimize using |
1054 | * it. Thus it is called only upon a 'mount' or 'open'. This |
1055 | * is the best way of combining speed and utility, I think. |
1056 | * People changing diskettes in the middle of an operation deserve |
1057 | * to lose :-) |
1058 | */ |
1059 | int check_disk_change(struct block_device *bdev) |
1060 | { |
1061 | struct gendisk *disk = bdev->bd_disk; |
1062 | const struct block_device_operations *bdops = disk->fops; |
1063 | unsigned int events; |
1064 | |
1065 | events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE | |
1066 | DISK_EVENT_EJECT_REQUEST); |
1067 | if (!(events & DISK_EVENT_MEDIA_CHANGE)) |
1068 | return 0; |
1069 | |
1070 | flush_disk(bdev, true); |
1071 | if (bdops->revalidate_disk) |
1072 | bdops->revalidate_disk(bdev->bd_disk); |
1073 | return 1; |
1074 | } |
1075 | |
1076 | EXPORT_SYMBOL(check_disk_change); |
1077 | |
1078 | void bd_set_size(struct block_device *bdev, loff_t size) |
1079 | { |
1080 | unsigned bsize = bdev_logical_block_size(bdev); |
1081 | |
1082 | mutex_lock(&bdev->bd_inode->i_mutex); |
1083 | i_size_write(bdev->bd_inode, size); |
1084 | mutex_unlock(&bdev->bd_inode->i_mutex); |
1085 | while (bsize < PAGE_CACHE_SIZE) { |
1086 | if (size & bsize) |
1087 | break; |
1088 | bsize <<= 1; |
1089 | } |
1090 | bdev->bd_block_size = bsize; |
1091 | bdev->bd_inode->i_blkbits = blksize_bits(bsize); |
1092 | } |
1093 | EXPORT_SYMBOL(bd_set_size); |
1094 | |
1095 | static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); |
1096 | |
1097 | /* |
1098 | * bd_mutex locking: |
1099 | * |
1100 | * mutex_lock(part->bd_mutex) |
1101 | * mutex_lock_nested(whole->bd_mutex, 1) |
1102 | */ |
1103 | |
1104 | static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) |
1105 | { |
1106 | struct gendisk *disk; |
1107 | struct module *owner; |
1108 | int ret; |
1109 | int partno; |
1110 | int perm = 0; |
1111 | |
1112 | if (mode & FMODE_READ) |
1113 | perm |= MAY_READ; |
1114 | if (mode & FMODE_WRITE) |
1115 | perm |= MAY_WRITE; |
1116 | /* |
1117 | * hooks: /n/, see "layering violations". |
1118 | */ |
1119 | if (!for_part) { |
1120 | ret = devcgroup_inode_permission(bdev->bd_inode, perm); |
1121 | if (ret != 0) { |
1122 | bdput(bdev); |
1123 | return ret; |
1124 | } |
1125 | } |
1126 | |
1127 | restart: |
1128 | |
1129 | ret = -ENXIO; |
1130 | disk = get_gendisk(bdev->bd_dev, &partno); |
1131 | if (!disk) |
1132 | goto out; |
1133 | owner = disk->fops->owner; |
1134 | |
1135 | disk_block_events(disk); |
1136 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
1137 | if (!bdev->bd_openers) { |
1138 | bdev->bd_disk = disk; |
1139 | bdev->bd_queue = disk->queue; |
1140 | bdev->bd_contains = bdev; |
1141 | if (!partno) { |
1142 | struct backing_dev_info *bdi; |
1143 | |
1144 | ret = -ENXIO; |
1145 | bdev->bd_part = disk_get_part(disk, partno); |
1146 | if (!bdev->bd_part) |
1147 | goto out_clear; |
1148 | |
1149 | ret = 0; |
1150 | if (disk->fops->open) { |
1151 | ret = disk->fops->open(bdev, mode); |
1152 | if (ret == -ERESTARTSYS) { |
1153 | /* Lost a race with 'disk' being |
1154 | * deleted, try again. |
1155 | * See md.c |
1156 | */ |
1157 | disk_put_part(bdev->bd_part); |
1158 | bdev->bd_part = NULL; |
1159 | bdev->bd_disk = NULL; |
1160 | bdev->bd_queue = NULL; |
1161 | mutex_unlock(&bdev->bd_mutex); |
1162 | disk_unblock_events(disk); |
1163 | put_disk(disk); |
1164 | module_put(owner); |
1165 | goto restart; |
1166 | } |
1167 | } |
1168 | |
1169 | if (!ret) { |
1170 | bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); |
1171 | bdi = blk_get_backing_dev_info(bdev); |
1172 | bdev_inode_switch_bdi(bdev->bd_inode, bdi); |
1173 | } |
1174 | |
1175 | /* |
1176 | * If the device is invalidated, rescan partition |
1177 | * if open succeeded or failed with -ENOMEDIUM. |
1178 | * The latter is necessary to prevent ghost |
1179 | * partitions on a removed medium. |
1180 | */ |
1181 | if (bdev->bd_invalidated) { |
1182 | if (!ret) |
1183 | rescan_partitions(disk, bdev); |
1184 | else if (ret == -ENOMEDIUM) |
1185 | invalidate_partitions(disk, bdev); |
1186 | } |
1187 | if (ret) |
1188 | goto out_clear; |
1189 | } else { |
1190 | struct block_device *whole; |
1191 | whole = bdget_disk(disk, 0); |
1192 | ret = -ENOMEM; |
1193 | if (!whole) |
1194 | goto out_clear; |
1195 | BUG_ON(for_part); |
1196 | ret = __blkdev_get(whole, mode, 1); |
1197 | if (ret) |
1198 | goto out_clear; |
1199 | bdev->bd_contains = whole; |
1200 | bdev_inode_switch_bdi(bdev->bd_inode, |
1201 | whole->bd_inode->i_data.backing_dev_info); |
1202 | bdev->bd_part = disk_get_part(disk, partno); |
1203 | if (!(disk->flags & GENHD_FL_UP) || |
1204 | !bdev->bd_part || !bdev->bd_part->nr_sects) { |
1205 | ret = -ENXIO; |
1206 | goto out_clear; |
1207 | } |
1208 | bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); |
1209 | } |
1210 | } else { |
1211 | if (bdev->bd_contains == bdev) { |
1212 | ret = 0; |
1213 | if (bdev->bd_disk->fops->open) |
1214 | ret = bdev->bd_disk->fops->open(bdev, mode); |
1215 | /* the same as first opener case, read comment there */ |
1216 | if (bdev->bd_invalidated) { |
1217 | if (!ret) |
1218 | rescan_partitions(bdev->bd_disk, bdev); |
1219 | else if (ret == -ENOMEDIUM) |
1220 | invalidate_partitions(bdev->bd_disk, bdev); |
1221 | } |
1222 | if (ret) |
1223 | goto out_unlock_bdev; |
1224 | } |
1225 | /* only one opener holds refs to the module and disk */ |
1226 | put_disk(disk); |
1227 | module_put(owner); |
1228 | } |
1229 | bdev->bd_openers++; |
1230 | if (for_part) |
1231 | bdev->bd_part_count++; |
1232 | mutex_unlock(&bdev->bd_mutex); |
1233 | disk_unblock_events(disk); |
1234 | return 0; |
1235 | |
1236 | out_clear: |
1237 | disk_put_part(bdev->bd_part); |
1238 | bdev->bd_disk = NULL; |
1239 | bdev->bd_part = NULL; |
1240 | bdev->bd_queue = NULL; |
1241 | bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info); |
1242 | if (bdev != bdev->bd_contains) |
1243 | __blkdev_put(bdev->bd_contains, mode, 1); |
1244 | bdev->bd_contains = NULL; |
1245 | out_unlock_bdev: |
1246 | mutex_unlock(&bdev->bd_mutex); |
1247 | disk_unblock_events(disk); |
1248 | put_disk(disk); |
1249 | module_put(owner); |
1250 | out: |
1251 | bdput(bdev); |
1252 | |
1253 | return ret; |
1254 | } |
1255 | |
1256 | /** |
1257 | * blkdev_get - open a block device |
1258 | * @bdev: block_device to open |
1259 | * @mode: FMODE_* mask |
1260 | * @holder: exclusive holder identifier |
1261 | * |
1262 | * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is |
1263 | * open with exclusive access. Specifying %FMODE_EXCL with %NULL |
1264 | * @holder is invalid. Exclusive opens may nest for the same @holder. |
1265 | * |
1266 | * On success, the reference count of @bdev is unchanged. On failure, |
1267 | * @bdev is put. |
1268 | * |
1269 | * CONTEXT: |
1270 | * Might sleep. |
1271 | * |
1272 | * RETURNS: |
1273 | * 0 on success, -errno on failure. |
1274 | */ |
1275 | int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) |
1276 | { |
1277 | struct block_device *whole = NULL; |
1278 | int res; |
1279 | |
1280 | WARN_ON_ONCE((mode & FMODE_EXCL) && !holder); |
1281 | |
1282 | if ((mode & FMODE_EXCL) && holder) { |
1283 | whole = bd_start_claiming(bdev, holder); |
1284 | if (IS_ERR(whole)) { |
1285 | bdput(bdev); |
1286 | return PTR_ERR(whole); |
1287 | } |
1288 | } |
1289 | |
1290 | res = __blkdev_get(bdev, mode, 0); |
1291 | |
1292 | if (whole) { |
1293 | struct gendisk *disk = whole->bd_disk; |
1294 | |
1295 | /* finish claiming */ |
1296 | mutex_lock(&bdev->bd_mutex); |
1297 | spin_lock(&bdev_lock); |
1298 | |
1299 | if (!res) { |
1300 | BUG_ON(!bd_may_claim(bdev, whole, holder)); |
1301 | /* |
1302 | * Note that for a whole device bd_holders |
1303 | * will be incremented twice, and bd_holder |
1304 | * will be set to bd_may_claim before being |
1305 | * set to holder |
1306 | */ |
1307 | whole->bd_holders++; |
1308 | whole->bd_holder = bd_may_claim; |
1309 | bdev->bd_holders++; |
1310 | bdev->bd_holder = holder; |
1311 | } |
1312 | |
1313 | /* tell others that we're done */ |
1314 | BUG_ON(whole->bd_claiming != holder); |
1315 | whole->bd_claiming = NULL; |
1316 | wake_up_bit(&whole->bd_claiming, 0); |
1317 | |
1318 | spin_unlock(&bdev_lock); |
1319 | |
1320 | /* |
1321 | * Block event polling for write claims if requested. Any |
1322 | * write holder makes the write_holder state stick until |
1323 | * all are released. This is good enough and tracking |
1324 | * individual writeable reference is too fragile given the |
1325 | * way @mode is used in blkdev_get/put(). |
1326 | */ |
1327 | if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder && |
1328 | (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { |
1329 | bdev->bd_write_holder = true; |
1330 | disk_block_events(disk); |
1331 | } |
1332 | |
1333 | mutex_unlock(&bdev->bd_mutex); |
1334 | bdput(whole); |
1335 | } |
1336 | |
1337 | return res; |
1338 | } |
1339 | EXPORT_SYMBOL(blkdev_get); |
1340 | |
1341 | /** |
1342 | * blkdev_get_by_path - open a block device by name |
1343 | * @path: path to the block device to open |
1344 | * @mode: FMODE_* mask |
1345 | * @holder: exclusive holder identifier |
1346 | * |
1347 | * Open the blockdevice described by the device file at @path. @mode |
1348 | * and @holder are identical to blkdev_get(). |
1349 | * |
1350 | * On success, the returned block_device has reference count of one. |
1351 | * |
1352 | * CONTEXT: |
1353 | * Might sleep. |
1354 | * |
1355 | * RETURNS: |
1356 | * Pointer to block_device on success, ERR_PTR(-errno) on failure. |
1357 | */ |
1358 | struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, |
1359 | void *holder) |
1360 | { |
1361 | struct block_device *bdev; |
1362 | int err; |
1363 | |
1364 | bdev = lookup_bdev(path); |
1365 | if (IS_ERR(bdev)) |
1366 | return bdev; |
1367 | |
1368 | err = blkdev_get(bdev, mode, holder); |
1369 | if (err) |
1370 | return ERR_PTR(err); |
1371 | |
1372 | if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { |
1373 | blkdev_put(bdev, mode); |
1374 | return ERR_PTR(-EACCES); |
1375 | } |
1376 | |
1377 | return bdev; |
1378 | } |
1379 | EXPORT_SYMBOL(blkdev_get_by_path); |
1380 | |
1381 | /** |
1382 | * blkdev_get_by_dev - open a block device by device number |
1383 | * @dev: device number of block device to open |
1384 | * @mode: FMODE_* mask |
1385 | * @holder: exclusive holder identifier |
1386 | * |
1387 | * Open the blockdevice described by device number @dev. @mode and |
1388 | * @holder are identical to blkdev_get(). |
1389 | * |
1390 | * Use it ONLY if you really do not have anything better - i.e. when |
1391 | * you are behind a truly sucky interface and all you are given is a |
1392 | * device number. _Never_ to be used for internal purposes. If you |
1393 | * ever need it - reconsider your API. |
1394 | * |
1395 | * On success, the returned block_device has reference count of one. |
1396 | * |
1397 | * CONTEXT: |
1398 | * Might sleep. |
1399 | * |
1400 | * RETURNS: |
1401 | * Pointer to block_device on success, ERR_PTR(-errno) on failure. |
1402 | */ |
1403 | struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) |
1404 | { |
1405 | struct block_device *bdev; |
1406 | int err; |
1407 | |
1408 | bdev = bdget(dev); |
1409 | if (!bdev) |
1410 | return ERR_PTR(-ENOMEM); |
1411 | |
1412 | err = blkdev_get(bdev, mode, holder); |
1413 | if (err) |
1414 | return ERR_PTR(err); |
1415 | |
1416 | return bdev; |
1417 | } |
1418 | EXPORT_SYMBOL(blkdev_get_by_dev); |
1419 | |
1420 | static int blkdev_open(struct inode * inode, struct file * filp) |
1421 | { |
1422 | struct block_device *bdev; |
1423 | |
1424 | /* |
1425 | * Preserve backwards compatibility and allow large file access |
1426 | * even if userspace doesn't ask for it explicitly. Some mkfs |
1427 | * binary needs it. We might want to drop this workaround |
1428 | * during an unstable branch. |
1429 | */ |
1430 | filp->f_flags |= O_LARGEFILE; |
1431 | |
1432 | if (filp->f_flags & O_NDELAY) |
1433 | filp->f_mode |= FMODE_NDELAY; |
1434 | if (filp->f_flags & O_EXCL) |
1435 | filp->f_mode |= FMODE_EXCL; |
1436 | if ((filp->f_flags & O_ACCMODE) == 3) |
1437 | filp->f_mode |= FMODE_WRITE_IOCTL; |
1438 | |
1439 | bdev = bd_acquire(inode); |
1440 | if (bdev == NULL) |
1441 | return -ENOMEM; |
1442 | |
1443 | filp->f_mapping = bdev->bd_inode->i_mapping; |
1444 | |
1445 | return blkdev_get(bdev, filp->f_mode, filp); |
1446 | } |
1447 | |
1448 | static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) |
1449 | { |
1450 | struct gendisk *disk = bdev->bd_disk; |
1451 | struct block_device *victim = NULL; |
1452 | |
1453 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
1454 | if (for_part) |
1455 | bdev->bd_part_count--; |
1456 | |
1457 | if (!--bdev->bd_openers) { |
1458 | WARN_ON_ONCE(bdev->bd_holders); |
1459 | sync_blockdev(bdev); |
1460 | kill_bdev(bdev); |
1461 | /* ->release can cause the old bdi to disappear, |
1462 | * so must switch it out first |
1463 | */ |
1464 | bdev_inode_switch_bdi(bdev->bd_inode, |
1465 | &default_backing_dev_info); |
1466 | } |
1467 | if (bdev->bd_contains == bdev) { |
1468 | if (disk->fops->release) |
1469 | disk->fops->release(disk, mode); |
1470 | } |
1471 | if (!bdev->bd_openers) { |
1472 | struct module *owner = disk->fops->owner; |
1473 | |
1474 | disk_put_part(bdev->bd_part); |
1475 | bdev->bd_part = NULL; |
1476 | bdev->bd_disk = NULL; |
1477 | if (bdev != bdev->bd_contains) |
1478 | victim = bdev->bd_contains; |
1479 | bdev->bd_contains = NULL; |
1480 | |
1481 | put_disk(disk); |
1482 | module_put(owner); |
1483 | } |
1484 | mutex_unlock(&bdev->bd_mutex); |
1485 | bdput(bdev); |
1486 | if (victim) |
1487 | __blkdev_put(victim, mode, 1); |
1488 | } |
1489 | |
1490 | void blkdev_put(struct block_device *bdev, fmode_t mode) |
1491 | { |
1492 | mutex_lock(&bdev->bd_mutex); |
1493 | |
1494 | if (mode & FMODE_EXCL) { |
1495 | bool bdev_free; |
1496 | |
1497 | /* |
1498 | * Release a claim on the device. The holder fields |
1499 | * are protected with bdev_lock. bd_mutex is to |
1500 | * synchronize disk_holder unlinking. |
1501 | */ |
1502 | spin_lock(&bdev_lock); |
1503 | |
1504 | WARN_ON_ONCE(--bdev->bd_holders < 0); |
1505 | WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); |
1506 | |
1507 | /* bd_contains might point to self, check in a separate step */ |
1508 | if ((bdev_free = !bdev->bd_holders)) |
1509 | bdev->bd_holder = NULL; |
1510 | if (!bdev->bd_contains->bd_holders) |
1511 | bdev->bd_contains->bd_holder = NULL; |
1512 | |
1513 | spin_unlock(&bdev_lock); |
1514 | |
1515 | /* |
1516 | * If this was the last claim, remove holder link and |
1517 | * unblock evpoll if it was a write holder. |
1518 | */ |
1519 | if (bdev_free && bdev->bd_write_holder) { |
1520 | disk_unblock_events(bdev->bd_disk); |
1521 | bdev->bd_write_holder = false; |
1522 | } |
1523 | } |
1524 | |
1525 | /* |
1526 | * Trigger event checking and tell drivers to flush MEDIA_CHANGE |
1527 | * event. This is to ensure detection of media removal commanded |
1528 | * from userland - e.g. eject(1). |
1529 | */ |
1530 | disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE); |
1531 | |
1532 | mutex_unlock(&bdev->bd_mutex); |
1533 | |
1534 | __blkdev_put(bdev, mode, 0); |
1535 | } |
1536 | EXPORT_SYMBOL(blkdev_put); |
1537 | |
1538 | static int blkdev_close(struct inode * inode, struct file * filp) |
1539 | { |
1540 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
1541 | blkdev_put(bdev, filp->f_mode); |
1542 | return 0; |
1543 | } |
1544 | |
1545 | static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) |
1546 | { |
1547 | struct block_device *bdev = I_BDEV(file->f_mapping->host); |
1548 | fmode_t mode = file->f_mode; |
1549 | |
1550 | /* |
1551 | * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have |
1552 | * to updated it before every ioctl. |
1553 | */ |
1554 | if (file->f_flags & O_NDELAY) |
1555 | mode |= FMODE_NDELAY; |
1556 | else |
1557 | mode &= ~FMODE_NDELAY; |
1558 | |
1559 | return blkdev_ioctl(bdev, mode, cmd, arg); |
1560 | } |
1561 | |
1562 | /* |
1563 | * Write data to the block device. Only intended for the block device itself |
1564 | * and the raw driver which basically is a fake block device. |
1565 | * |
1566 | * Does not take i_mutex for the write and thus is not for general purpose |
1567 | * use. |
1568 | */ |
1569 | ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) |
1570 | { |
1571 | struct file *file = iocb->ki_filp; |
1572 | struct blk_plug plug; |
1573 | ssize_t ret; |
1574 | |
1575 | blk_start_plug(&plug); |
1576 | ret = __generic_file_write_iter(iocb, from); |
1577 | if (ret > 0) { |
1578 | ssize_t err; |
1579 | err = generic_write_sync(file, iocb->ki_pos - ret, ret); |
1580 | if (err < 0) |
1581 | ret = err; |
1582 | } |
1583 | blk_finish_plug(&plug); |
1584 | return ret; |
1585 | } |
1586 | EXPORT_SYMBOL_GPL(blkdev_write_iter); |
1587 | |
1588 | ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) |
1589 | { |
1590 | struct file *file = iocb->ki_filp; |
1591 | struct inode *bd_inode = file->f_mapping->host; |
1592 | loff_t size = i_size_read(bd_inode); |
1593 | loff_t pos = iocb->ki_pos; |
1594 | |
1595 | if (pos >= size) |
1596 | return 0; |
1597 | |
1598 | size -= pos; |
1599 | iov_iter_truncate(to, size); |
1600 | return generic_file_read_iter(iocb, to); |
1601 | } |
1602 | EXPORT_SYMBOL_GPL(blkdev_read_iter); |
1603 | |
1604 | /* |
1605 | * Try to release a page associated with block device when the system |
1606 | * is under memory pressure. |
1607 | */ |
1608 | static int blkdev_releasepage(struct page *page, gfp_t wait) |
1609 | { |
1610 | struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super; |
1611 | |
1612 | if (super && super->s_op->bdev_try_to_free_page) |
1613 | return super->s_op->bdev_try_to_free_page(super, page, wait); |
1614 | |
1615 | return try_to_free_buffers(page); |
1616 | } |
1617 | |
1618 | static const struct address_space_operations def_blk_aops = { |
1619 | .readpage = blkdev_readpage, |
1620 | .readpages = blkdev_readpages, |
1621 | .writepage = blkdev_writepage, |
1622 | .write_begin = blkdev_write_begin, |
1623 | .write_end = blkdev_write_end, |
1624 | .writepages = generic_writepages, |
1625 | .releasepage = blkdev_releasepage, |
1626 | .direct_IO = blkdev_direct_IO, |
1627 | .is_dirty_writeback = buffer_check_dirty_writeback, |
1628 | }; |
1629 | |
1630 | const struct file_operations def_blk_fops = { |
1631 | .open = blkdev_open, |
1632 | .release = blkdev_close, |
1633 | .llseek = block_llseek, |
1634 | .read = new_sync_read, |
1635 | .write = new_sync_write, |
1636 | .read_iter = blkdev_read_iter, |
1637 | .write_iter = blkdev_write_iter, |
1638 | .mmap = generic_file_mmap, |
1639 | .fsync = blkdev_fsync, |
1640 | .unlocked_ioctl = block_ioctl, |
1641 | #ifdef CONFIG_COMPAT |
1642 | .compat_ioctl = compat_blkdev_ioctl, |
1643 | #endif |
1644 | .splice_read = generic_file_splice_read, |
1645 | .splice_write = iter_file_splice_write, |
1646 | }; |
1647 | |
1648 | int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) |
1649 | { |
1650 | int res; |
1651 | mm_segment_t old_fs = get_fs(); |
1652 | set_fs(KERNEL_DS); |
1653 | res = blkdev_ioctl(bdev, 0, cmd, arg); |
1654 | set_fs(old_fs); |
1655 | return res; |
1656 | } |
1657 | |
1658 | EXPORT_SYMBOL(ioctl_by_bdev); |
1659 | |
1660 | /** |
1661 | * lookup_bdev - lookup a struct block_device by name |
1662 | * @pathname: special file representing the block device |
1663 | * |
1664 | * Get a reference to the blockdevice at @pathname in the current |
1665 | * namespace if possible and return it. Return ERR_PTR(error) |
1666 | * otherwise. |
1667 | */ |
1668 | struct block_device *lookup_bdev(const char *pathname) |
1669 | { |
1670 | struct block_device *bdev; |
1671 | struct inode *inode; |
1672 | struct path path; |
1673 | int error; |
1674 | |
1675 | if (!pathname || !*pathname) |
1676 | return ERR_PTR(-EINVAL); |
1677 | |
1678 | error = kern_path(pathname, LOOKUP_FOLLOW, &path); |
1679 | if (error) |
1680 | return ERR_PTR(error); |
1681 | |
1682 | inode = path.dentry->d_inode; |
1683 | error = -ENOTBLK; |
1684 | if (!S_ISBLK(inode->i_mode)) |
1685 | goto fail; |
1686 | error = -EACCES; |
1687 | if (path.mnt->mnt_flags & MNT_NODEV) |
1688 | goto fail; |
1689 | error = -ENOMEM; |
1690 | bdev = bd_acquire(inode); |
1691 | if (!bdev) |
1692 | goto fail; |
1693 | out: |
1694 | path_put(&path); |
1695 | return bdev; |
1696 | fail: |
1697 | bdev = ERR_PTR(error); |
1698 | goto out; |
1699 | } |
1700 | EXPORT_SYMBOL(lookup_bdev); |
1701 | |
1702 | int __invalidate_device(struct block_device *bdev, bool kill_dirty) |
1703 | { |
1704 | struct super_block *sb = get_super(bdev); |
1705 | int res = 0; |
1706 | |
1707 | if (sb) { |
1708 | /* |
1709 | * no need to lock the super, get_super holds the |
1710 | * read mutex so the filesystem cannot go away |
1711 | * under us (->put_super runs with the write lock |
1712 | * hold). |
1713 | */ |
1714 | shrink_dcache_sb(sb); |
1715 | res = invalidate_inodes(sb, kill_dirty); |
1716 | drop_super(sb); |
1717 | } |
1718 | invalidate_bdev(bdev); |
1719 | return res; |
1720 | } |
1721 | EXPORT_SYMBOL(__invalidate_device); |
1722 | |
1723 | void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) |
1724 | { |
1725 | struct inode *inode, *old_inode = NULL; |
1726 | |
1727 | spin_lock(&inode_sb_list_lock); |
1728 | list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { |
1729 | struct address_space *mapping = inode->i_mapping; |
1730 | |
1731 | spin_lock(&inode->i_lock); |
1732 | if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || |
1733 | mapping->nrpages == 0) { |
1734 | spin_unlock(&inode->i_lock); |
1735 | continue; |
1736 | } |
1737 | __iget(inode); |
1738 | spin_unlock(&inode->i_lock); |
1739 | spin_unlock(&inode_sb_list_lock); |
1740 | /* |
1741 | * We hold a reference to 'inode' so it couldn't have been |
1742 | * removed from s_inodes list while we dropped the |
1743 | * inode_sb_list_lock. We cannot iput the inode now as we can |
1744 | * be holding the last reference and we cannot iput it under |
1745 | * inode_sb_list_lock. So we keep the reference and iput it |
1746 | * later. |
1747 | */ |
1748 | iput(old_inode); |
1749 | old_inode = inode; |
1750 | |
1751 | func(I_BDEV(inode), arg); |
1752 | |
1753 | spin_lock(&inode_sb_list_lock); |
1754 | } |
1755 | spin_unlock(&inode_sb_list_lock); |
1756 | iput(old_inode); |
1757 | } |
1758 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9