Root/
1 | /* |
2 | * fs/libfs.c |
3 | * Library for filesystems writers. |
4 | */ |
5 | |
6 | #include <linux/module.h> |
7 | #include <linux/pagemap.h> |
8 | #include <linux/slab.h> |
9 | #include <linux/mount.h> |
10 | #include <linux/vfs.h> |
11 | #include <linux/mutex.h> |
12 | #include <linux/exportfs.h> |
13 | #include <linux/writeback.h> |
14 | #include <linux/buffer_head.h> |
15 | |
16 | #include <asm/uaccess.h> |
17 | |
18 | int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, |
19 | struct kstat *stat) |
20 | { |
21 | struct inode *inode = dentry->d_inode; |
22 | generic_fillattr(inode, stat); |
23 | stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); |
24 | return 0; |
25 | } |
26 | |
27 | int simple_statfs(struct dentry *dentry, struct kstatfs *buf) |
28 | { |
29 | buf->f_type = dentry->d_sb->s_magic; |
30 | buf->f_bsize = PAGE_CACHE_SIZE; |
31 | buf->f_namelen = NAME_MAX; |
32 | return 0; |
33 | } |
34 | |
35 | /* |
36 | * Retaining negative dentries for an in-memory filesystem just wastes |
37 | * memory and lookup time: arrange for them to be deleted immediately. |
38 | */ |
39 | static int simple_delete_dentry(struct dentry *dentry) |
40 | { |
41 | return 1; |
42 | } |
43 | |
44 | /* |
45 | * Lookup the data. This is trivial - if the dentry didn't already |
46 | * exist, we know it is negative. Set d_op to delete negative dentries. |
47 | */ |
48 | struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) |
49 | { |
50 | static const struct dentry_operations simple_dentry_operations = { |
51 | .d_delete = simple_delete_dentry, |
52 | }; |
53 | |
54 | if (dentry->d_name.len > NAME_MAX) |
55 | return ERR_PTR(-ENAMETOOLONG); |
56 | dentry->d_op = &simple_dentry_operations; |
57 | d_add(dentry, NULL); |
58 | return NULL; |
59 | } |
60 | |
61 | int simple_sync_file(struct file * file, struct dentry *dentry, int datasync) |
62 | { |
63 | return 0; |
64 | } |
65 | |
66 | int dcache_dir_open(struct inode *inode, struct file *file) |
67 | { |
68 | static struct qstr cursor_name = {.len = 1, .name = "."}; |
69 | |
70 | file->private_data = d_alloc(file->f_path.dentry, &cursor_name); |
71 | |
72 | return file->private_data ? 0 : -ENOMEM; |
73 | } |
74 | |
75 | int dcache_dir_close(struct inode *inode, struct file *file) |
76 | { |
77 | dput(file->private_data); |
78 | return 0; |
79 | } |
80 | |
81 | loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) |
82 | { |
83 | mutex_lock(&file->f_path.dentry->d_inode->i_mutex); |
84 | switch (origin) { |
85 | case 1: |
86 | offset += file->f_pos; |
87 | case 0: |
88 | if (offset >= 0) |
89 | break; |
90 | default: |
91 | mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); |
92 | return -EINVAL; |
93 | } |
94 | if (offset != file->f_pos) { |
95 | file->f_pos = offset; |
96 | if (file->f_pos >= 2) { |
97 | struct list_head *p; |
98 | struct dentry *cursor = file->private_data; |
99 | loff_t n = file->f_pos - 2; |
100 | |
101 | spin_lock(&dcache_lock); |
102 | list_del(&cursor->d_u.d_child); |
103 | p = file->f_path.dentry->d_subdirs.next; |
104 | while (n && p != &file->f_path.dentry->d_subdirs) { |
105 | struct dentry *next; |
106 | next = list_entry(p, struct dentry, d_u.d_child); |
107 | if (!d_unhashed(next) && next->d_inode) |
108 | n--; |
109 | p = p->next; |
110 | } |
111 | list_add_tail(&cursor->d_u.d_child, p); |
112 | spin_unlock(&dcache_lock); |
113 | } |
114 | } |
115 | mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); |
116 | return offset; |
117 | } |
118 | |
119 | /* Relationship between i_mode and the DT_xxx types */ |
120 | static inline unsigned char dt_type(struct inode *inode) |
121 | { |
122 | return (inode->i_mode >> 12) & 15; |
123 | } |
124 | |
125 | /* |
126 | * Directory is locked and all positive dentries in it are safe, since |
127 | * for ramfs-type trees they can't go away without unlink() or rmdir(), |
128 | * both impossible due to the lock on directory. |
129 | */ |
130 | |
131 | int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) |
132 | { |
133 | struct dentry *dentry = filp->f_path.dentry; |
134 | struct dentry *cursor = filp->private_data; |
135 | struct list_head *p, *q = &cursor->d_u.d_child; |
136 | ino_t ino; |
137 | int i = filp->f_pos; |
138 | |
139 | switch (i) { |
140 | case 0: |
141 | ino = dentry->d_inode->i_ino; |
142 | if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) |
143 | break; |
144 | filp->f_pos++; |
145 | i++; |
146 | /* fallthrough */ |
147 | case 1: |
148 | ino = parent_ino(dentry); |
149 | if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) |
150 | break; |
151 | filp->f_pos++; |
152 | i++; |
153 | /* fallthrough */ |
154 | default: |
155 | spin_lock(&dcache_lock); |
156 | if (filp->f_pos == 2) |
157 | list_move(q, &dentry->d_subdirs); |
158 | |
159 | for (p=q->next; p != &dentry->d_subdirs; p=p->next) { |
160 | struct dentry *next; |
161 | next = list_entry(p, struct dentry, d_u.d_child); |
162 | if (d_unhashed(next) || !next->d_inode) |
163 | continue; |
164 | |
165 | spin_unlock(&dcache_lock); |
166 | if (filldir(dirent, next->d_name.name, |
167 | next->d_name.len, filp->f_pos, |
168 | next->d_inode->i_ino, |
169 | dt_type(next->d_inode)) < 0) |
170 | return 0; |
171 | spin_lock(&dcache_lock); |
172 | /* next is still alive */ |
173 | list_move(q, p); |
174 | p = q; |
175 | filp->f_pos++; |
176 | } |
177 | spin_unlock(&dcache_lock); |
178 | } |
179 | return 0; |
180 | } |
181 | |
182 | ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos) |
183 | { |
184 | return -EISDIR; |
185 | } |
186 | |
187 | const struct file_operations simple_dir_operations = { |
188 | .open = dcache_dir_open, |
189 | .release = dcache_dir_close, |
190 | .llseek = dcache_dir_lseek, |
191 | .read = generic_read_dir, |
192 | .readdir = dcache_readdir, |
193 | .fsync = simple_sync_file, |
194 | }; |
195 | |
196 | const struct inode_operations simple_dir_inode_operations = { |
197 | .lookup = simple_lookup, |
198 | }; |
199 | |
200 | static const struct super_operations simple_super_operations = { |
201 | .statfs = simple_statfs, |
202 | }; |
203 | |
204 | /* |
205 | * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that |
206 | * will never be mountable) |
207 | */ |
208 | int get_sb_pseudo(struct file_system_type *fs_type, char *name, |
209 | const struct super_operations *ops, unsigned long magic, |
210 | struct vfsmount *mnt) |
211 | { |
212 | struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); |
213 | struct dentry *dentry; |
214 | struct inode *root; |
215 | struct qstr d_name = {.name = name, .len = strlen(name)}; |
216 | |
217 | if (IS_ERR(s)) |
218 | return PTR_ERR(s); |
219 | |
220 | s->s_flags = MS_NOUSER; |
221 | s->s_maxbytes = MAX_LFS_FILESIZE; |
222 | s->s_blocksize = PAGE_SIZE; |
223 | s->s_blocksize_bits = PAGE_SHIFT; |
224 | s->s_magic = magic; |
225 | s->s_op = ops ? ops : &simple_super_operations; |
226 | s->s_time_gran = 1; |
227 | root = new_inode(s); |
228 | if (!root) |
229 | goto Enomem; |
230 | /* |
231 | * since this is the first inode, make it number 1. New inodes created |
232 | * after this must take care not to collide with it (by passing |
233 | * max_reserved of 1 to iunique). |
234 | */ |
235 | root->i_ino = 1; |
236 | root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; |
237 | root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME; |
238 | dentry = d_alloc(NULL, &d_name); |
239 | if (!dentry) { |
240 | iput(root); |
241 | goto Enomem; |
242 | } |
243 | dentry->d_sb = s; |
244 | dentry->d_parent = dentry; |
245 | d_instantiate(dentry, root); |
246 | s->s_root = dentry; |
247 | s->s_flags |= MS_ACTIVE; |
248 | simple_set_mnt(mnt, s); |
249 | return 0; |
250 | |
251 | Enomem: |
252 | deactivate_locked_super(s); |
253 | return -ENOMEM; |
254 | } |
255 | |
256 | int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) |
257 | { |
258 | struct inode *inode = old_dentry->d_inode; |
259 | |
260 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
261 | inc_nlink(inode); |
262 | atomic_inc(&inode->i_count); |
263 | dget(dentry); |
264 | d_instantiate(dentry, inode); |
265 | return 0; |
266 | } |
267 | |
268 | static inline int simple_positive(struct dentry *dentry) |
269 | { |
270 | return dentry->d_inode && !d_unhashed(dentry); |
271 | } |
272 | |
273 | int simple_empty(struct dentry *dentry) |
274 | { |
275 | struct dentry *child; |
276 | int ret = 0; |
277 | |
278 | spin_lock(&dcache_lock); |
279 | list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) |
280 | if (simple_positive(child)) |
281 | goto out; |
282 | ret = 1; |
283 | out: |
284 | spin_unlock(&dcache_lock); |
285 | return ret; |
286 | } |
287 | |
288 | int simple_unlink(struct inode *dir, struct dentry *dentry) |
289 | { |
290 | struct inode *inode = dentry->d_inode; |
291 | |
292 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
293 | drop_nlink(inode); |
294 | dput(dentry); |
295 | return 0; |
296 | } |
297 | |
298 | int simple_rmdir(struct inode *dir, struct dentry *dentry) |
299 | { |
300 | if (!simple_empty(dentry)) |
301 | return -ENOTEMPTY; |
302 | |
303 | drop_nlink(dentry->d_inode); |
304 | simple_unlink(dir, dentry); |
305 | drop_nlink(dir); |
306 | return 0; |
307 | } |
308 | |
309 | int simple_rename(struct inode *old_dir, struct dentry *old_dentry, |
310 | struct inode *new_dir, struct dentry *new_dentry) |
311 | { |
312 | struct inode *inode = old_dentry->d_inode; |
313 | int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode); |
314 | |
315 | if (!simple_empty(new_dentry)) |
316 | return -ENOTEMPTY; |
317 | |
318 | if (new_dentry->d_inode) { |
319 | simple_unlink(new_dir, new_dentry); |
320 | if (they_are_dirs) |
321 | drop_nlink(old_dir); |
322 | } else if (they_are_dirs) { |
323 | drop_nlink(old_dir); |
324 | inc_nlink(new_dir); |
325 | } |
326 | |
327 | old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime = |
328 | new_dir->i_mtime = inode->i_ctime = CURRENT_TIME; |
329 | |
330 | return 0; |
331 | } |
332 | |
333 | int simple_readpage(struct file *file, struct page *page) |
334 | { |
335 | clear_highpage(page); |
336 | flush_dcache_page(page); |
337 | SetPageUptodate(page); |
338 | unlock_page(page); |
339 | return 0; |
340 | } |
341 | |
342 | int simple_write_begin(struct file *file, struct address_space *mapping, |
343 | loff_t pos, unsigned len, unsigned flags, |
344 | struct page **pagep, void **fsdata) |
345 | { |
346 | struct page *page; |
347 | pgoff_t index; |
348 | |
349 | index = pos >> PAGE_CACHE_SHIFT; |
350 | |
351 | page = grab_cache_page_write_begin(mapping, index, flags); |
352 | if (!page) |
353 | return -ENOMEM; |
354 | |
355 | *pagep = page; |
356 | |
357 | if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { |
358 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); |
359 | |
360 | zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); |
361 | } |
362 | return 0; |
363 | } |
364 | |
365 | /** |
366 | * simple_write_end - .write_end helper for non-block-device FSes |
367 | * @available: See .write_end of address_space_operations |
368 | * @file: " |
369 | * @mapping: " |
370 | * @pos: " |
371 | * @len: " |
372 | * @copied: " |
373 | * @page: " |
374 | * @fsdata: " |
375 | * |
376 | * simple_write_end does the minimum needed for updating a page after writing is |
377 | * done. It has the same API signature as the .write_end of |
378 | * address_space_operations vector. So it can just be set onto .write_end for |
379 | * FSes that don't need any other processing. i_mutex is assumed to be held. |
380 | * Block based filesystems should use generic_write_end(). |
381 | * NOTE: Even though i_size might get updated by this function, mark_inode_dirty |
382 | * is not called, so a filesystem that actually does store data in .write_inode |
383 | * should extend on what's done here with a call to mark_inode_dirty() in the |
384 | * case that i_size has changed. |
385 | */ |
386 | int simple_write_end(struct file *file, struct address_space *mapping, |
387 | loff_t pos, unsigned len, unsigned copied, |
388 | struct page *page, void *fsdata) |
389 | { |
390 | struct inode *inode = page->mapping->host; |
391 | loff_t last_pos = pos + copied; |
392 | |
393 | /* zero the stale part of the page if we did a short copy */ |
394 | if (copied < len) { |
395 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); |
396 | |
397 | zero_user(page, from + copied, len - copied); |
398 | } |
399 | |
400 | if (!PageUptodate(page)) |
401 | SetPageUptodate(page); |
402 | /* |
403 | * No need to use i_size_read() here, the i_size |
404 | * cannot change under us because we hold the i_mutex. |
405 | */ |
406 | if (last_pos > inode->i_size) |
407 | i_size_write(inode, last_pos); |
408 | |
409 | set_page_dirty(page); |
410 | unlock_page(page); |
411 | page_cache_release(page); |
412 | |
413 | return copied; |
414 | } |
415 | |
416 | /* |
417 | * the inodes created here are not hashed. If you use iunique to generate |
418 | * unique inode values later for this filesystem, then you must take care |
419 | * to pass it an appropriate max_reserved value to avoid collisions. |
420 | */ |
421 | int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files) |
422 | { |
423 | struct inode *inode; |
424 | struct dentry *root; |
425 | struct dentry *dentry; |
426 | int i; |
427 | |
428 | s->s_blocksize = PAGE_CACHE_SIZE; |
429 | s->s_blocksize_bits = PAGE_CACHE_SHIFT; |
430 | s->s_magic = magic; |
431 | s->s_op = &simple_super_operations; |
432 | s->s_time_gran = 1; |
433 | |
434 | inode = new_inode(s); |
435 | if (!inode) |
436 | return -ENOMEM; |
437 | /* |
438 | * because the root inode is 1, the files array must not contain an |
439 | * entry at index 1 |
440 | */ |
441 | inode->i_ino = 1; |
442 | inode->i_mode = S_IFDIR | 0755; |
443 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
444 | inode->i_op = &simple_dir_inode_operations; |
445 | inode->i_fop = &simple_dir_operations; |
446 | inode->i_nlink = 2; |
447 | root = d_alloc_root(inode); |
448 | if (!root) { |
449 | iput(inode); |
450 | return -ENOMEM; |
451 | } |
452 | for (i = 0; !files->name || files->name[0]; i++, files++) { |
453 | if (!files->name) |
454 | continue; |
455 | |
456 | /* warn if it tries to conflict with the root inode */ |
457 | if (unlikely(i == 1)) |
458 | printk(KERN_WARNING "%s: %s passed in a files array" |
459 | "with an index of 1!\n", __func__, |
460 | s->s_type->name); |
461 | |
462 | dentry = d_alloc_name(root, files->name); |
463 | if (!dentry) |
464 | goto out; |
465 | inode = new_inode(s); |
466 | if (!inode) |
467 | goto out; |
468 | inode->i_mode = S_IFREG | files->mode; |
469 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
470 | inode->i_fop = files->ops; |
471 | inode->i_ino = i; |
472 | d_add(dentry, inode); |
473 | } |
474 | s->s_root = root; |
475 | return 0; |
476 | out: |
477 | d_genocide(root); |
478 | dput(root); |
479 | return -ENOMEM; |
480 | } |
481 | |
482 | static DEFINE_SPINLOCK(pin_fs_lock); |
483 | |
484 | int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count) |
485 | { |
486 | struct vfsmount *mnt = NULL; |
487 | spin_lock(&pin_fs_lock); |
488 | if (unlikely(!*mount)) { |
489 | spin_unlock(&pin_fs_lock); |
490 | mnt = vfs_kern_mount(type, 0, type->name, NULL); |
491 | if (IS_ERR(mnt)) |
492 | return PTR_ERR(mnt); |
493 | spin_lock(&pin_fs_lock); |
494 | if (!*mount) |
495 | *mount = mnt; |
496 | } |
497 | mntget(*mount); |
498 | ++*count; |
499 | spin_unlock(&pin_fs_lock); |
500 | mntput(mnt); |
501 | return 0; |
502 | } |
503 | |
504 | void simple_release_fs(struct vfsmount **mount, int *count) |
505 | { |
506 | struct vfsmount *mnt; |
507 | spin_lock(&pin_fs_lock); |
508 | mnt = *mount; |
509 | if (!--*count) |
510 | *mount = NULL; |
511 | spin_unlock(&pin_fs_lock); |
512 | mntput(mnt); |
513 | } |
514 | |
515 | /** |
516 | * simple_read_from_buffer - copy data from the buffer to user space |
517 | * @to: the user space buffer to read to |
518 | * @count: the maximum number of bytes to read |
519 | * @ppos: the current position in the buffer |
520 | * @from: the buffer to read from |
521 | * @available: the size of the buffer |
522 | * |
523 | * The simple_read_from_buffer() function reads up to @count bytes from the |
524 | * buffer @from at offset @ppos into the user space address starting at @to. |
525 | * |
526 | * On success, the number of bytes read is returned and the offset @ppos is |
527 | * advanced by this number, or negative value is returned on error. |
528 | **/ |
529 | ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, |
530 | const void *from, size_t available) |
531 | { |
532 | loff_t pos = *ppos; |
533 | size_t ret; |
534 | |
535 | if (pos < 0) |
536 | return -EINVAL; |
537 | if (pos >= available || !count) |
538 | return 0; |
539 | if (count > available - pos) |
540 | count = available - pos; |
541 | ret = copy_to_user(to, from + pos, count); |
542 | if (ret == count) |
543 | return -EFAULT; |
544 | count -= ret; |
545 | *ppos = pos + count; |
546 | return count; |
547 | } |
548 | |
549 | /** |
550 | * memory_read_from_buffer - copy data from the buffer |
551 | * @to: the kernel space buffer to read to |
552 | * @count: the maximum number of bytes to read |
553 | * @ppos: the current position in the buffer |
554 | * @from: the buffer to read from |
555 | * @available: the size of the buffer |
556 | * |
557 | * The memory_read_from_buffer() function reads up to @count bytes from the |
558 | * buffer @from at offset @ppos into the kernel space address starting at @to. |
559 | * |
560 | * On success, the number of bytes read is returned and the offset @ppos is |
561 | * advanced by this number, or negative value is returned on error. |
562 | **/ |
563 | ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, |
564 | const void *from, size_t available) |
565 | { |
566 | loff_t pos = *ppos; |
567 | |
568 | if (pos < 0) |
569 | return -EINVAL; |
570 | if (pos >= available) |
571 | return 0; |
572 | if (count > available - pos) |
573 | count = available - pos; |
574 | memcpy(to, from + pos, count); |
575 | *ppos = pos + count; |
576 | |
577 | return count; |
578 | } |
579 | |
580 | /* |
581 | * Transaction based IO. |
582 | * The file expects a single write which triggers the transaction, and then |
583 | * possibly a read which collects the result - which is stored in a |
584 | * file-local buffer. |
585 | */ |
586 | |
587 | void simple_transaction_set(struct file *file, size_t n) |
588 | { |
589 | struct simple_transaction_argresp *ar = file->private_data; |
590 | |
591 | BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); |
592 | |
593 | /* |
594 | * The barrier ensures that ar->size will really remain zero until |
595 | * ar->data is ready for reading. |
596 | */ |
597 | smp_mb(); |
598 | ar->size = n; |
599 | } |
600 | |
601 | char *simple_transaction_get(struct file *file, const char __user *buf, size_t size) |
602 | { |
603 | struct simple_transaction_argresp *ar; |
604 | static DEFINE_SPINLOCK(simple_transaction_lock); |
605 | |
606 | if (size > SIMPLE_TRANSACTION_LIMIT - 1) |
607 | return ERR_PTR(-EFBIG); |
608 | |
609 | ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL); |
610 | if (!ar) |
611 | return ERR_PTR(-ENOMEM); |
612 | |
613 | spin_lock(&simple_transaction_lock); |
614 | |
615 | /* only one write allowed per open */ |
616 | if (file->private_data) { |
617 | spin_unlock(&simple_transaction_lock); |
618 | free_page((unsigned long)ar); |
619 | return ERR_PTR(-EBUSY); |
620 | } |
621 | |
622 | file->private_data = ar; |
623 | |
624 | spin_unlock(&simple_transaction_lock); |
625 | |
626 | if (copy_from_user(ar->data, buf, size)) |
627 | return ERR_PTR(-EFAULT); |
628 | |
629 | return ar->data; |
630 | } |
631 | |
632 | ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos) |
633 | { |
634 | struct simple_transaction_argresp *ar = file->private_data; |
635 | |
636 | if (!ar) |
637 | return 0; |
638 | return simple_read_from_buffer(buf, size, pos, ar->data, ar->size); |
639 | } |
640 | |
641 | int simple_transaction_release(struct inode *inode, struct file *file) |
642 | { |
643 | free_page((unsigned long)file->private_data); |
644 | return 0; |
645 | } |
646 | |
647 | /* Simple attribute files */ |
648 | |
649 | struct simple_attr { |
650 | int (*get)(void *, u64 *); |
651 | int (*set)(void *, u64); |
652 | char get_buf[24]; /* enough to store a u64 and "\n\0" */ |
653 | char set_buf[24]; |
654 | void *data; |
655 | const char *fmt; /* format for read operation */ |
656 | struct mutex mutex; /* protects access to these buffers */ |
657 | }; |
658 | |
659 | /* simple_attr_open is called by an actual attribute open file operation |
660 | * to set the attribute specific access operations. */ |
661 | int simple_attr_open(struct inode *inode, struct file *file, |
662 | int (*get)(void *, u64 *), int (*set)(void *, u64), |
663 | const char *fmt) |
664 | { |
665 | struct simple_attr *attr; |
666 | |
667 | attr = kmalloc(sizeof(*attr), GFP_KERNEL); |
668 | if (!attr) |
669 | return -ENOMEM; |
670 | |
671 | attr->get = get; |
672 | attr->set = set; |
673 | attr->data = inode->i_private; |
674 | attr->fmt = fmt; |
675 | mutex_init(&attr->mutex); |
676 | |
677 | file->private_data = attr; |
678 | |
679 | return nonseekable_open(inode, file); |
680 | } |
681 | |
682 | int simple_attr_release(struct inode *inode, struct file *file) |
683 | { |
684 | kfree(file->private_data); |
685 | return 0; |
686 | } |
687 | |
688 | /* read from the buffer that is filled with the get function */ |
689 | ssize_t simple_attr_read(struct file *file, char __user *buf, |
690 | size_t len, loff_t *ppos) |
691 | { |
692 | struct simple_attr *attr; |
693 | size_t size; |
694 | ssize_t ret; |
695 | |
696 | attr = file->private_data; |
697 | |
698 | if (!attr->get) |
699 | return -EACCES; |
700 | |
701 | ret = mutex_lock_interruptible(&attr->mutex); |
702 | if (ret) |
703 | return ret; |
704 | |
705 | if (*ppos) { /* continued read */ |
706 | size = strlen(attr->get_buf); |
707 | } else { /* first read */ |
708 | u64 val; |
709 | ret = attr->get(attr->data, &val); |
710 | if (ret) |
711 | goto out; |
712 | |
713 | size = scnprintf(attr->get_buf, sizeof(attr->get_buf), |
714 | attr->fmt, (unsigned long long)val); |
715 | } |
716 | |
717 | ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); |
718 | out: |
719 | mutex_unlock(&attr->mutex); |
720 | return ret; |
721 | } |
722 | |
723 | /* interpret the buffer as a number to call the set function with */ |
724 | ssize_t simple_attr_write(struct file *file, const char __user *buf, |
725 | size_t len, loff_t *ppos) |
726 | { |
727 | struct simple_attr *attr; |
728 | u64 val; |
729 | size_t size; |
730 | ssize_t ret; |
731 | |
732 | attr = file->private_data; |
733 | if (!attr->set) |
734 | return -EACCES; |
735 | |
736 | ret = mutex_lock_interruptible(&attr->mutex); |
737 | if (ret) |
738 | return ret; |
739 | |
740 | ret = -EFAULT; |
741 | size = min(sizeof(attr->set_buf) - 1, len); |
742 | if (copy_from_user(attr->set_buf, buf, size)) |
743 | goto out; |
744 | |
745 | attr->set_buf[size] = '\0'; |
746 | val = simple_strtol(attr->set_buf, NULL, 0); |
747 | ret = attr->set(attr->data, val); |
748 | if (ret == 0) |
749 | ret = len; /* on success, claim we got the whole input */ |
750 | out: |
751 | mutex_unlock(&attr->mutex); |
752 | return ret; |
753 | } |
754 | |
755 | /** |
756 | * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation |
757 | * @sb: filesystem to do the file handle conversion on |
758 | * @fid: file handle to convert |
759 | * @fh_len: length of the file handle in bytes |
760 | * @fh_type: type of file handle |
761 | * @get_inode: filesystem callback to retrieve inode |
762 | * |
763 | * This function decodes @fid as long as it has one of the well-known |
764 | * Linux filehandle types and calls @get_inode on it to retrieve the |
765 | * inode for the object specified in the file handle. |
766 | */ |
767 | struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid, |
768 | int fh_len, int fh_type, struct inode *(*get_inode) |
769 | (struct super_block *sb, u64 ino, u32 gen)) |
770 | { |
771 | struct inode *inode = NULL; |
772 | |
773 | if (fh_len < 2) |
774 | return NULL; |
775 | |
776 | switch (fh_type) { |
777 | case FILEID_INO32_GEN: |
778 | case FILEID_INO32_GEN_PARENT: |
779 | inode = get_inode(sb, fid->i32.ino, fid->i32.gen); |
780 | break; |
781 | } |
782 | |
783 | return d_obtain_alias(inode); |
784 | } |
785 | EXPORT_SYMBOL_GPL(generic_fh_to_dentry); |
786 | |
787 | /** |
788 | * generic_fh_to_dentry - generic helper for the fh_to_parent export operation |
789 | * @sb: filesystem to do the file handle conversion on |
790 | * @fid: file handle to convert |
791 | * @fh_len: length of the file handle in bytes |
792 | * @fh_type: type of file handle |
793 | * @get_inode: filesystem callback to retrieve inode |
794 | * |
795 | * This function decodes @fid as long as it has one of the well-known |
796 | * Linux filehandle types and calls @get_inode on it to retrieve the |
797 | * inode for the _parent_ object specified in the file handle if it |
798 | * is specified in the file handle, or NULL otherwise. |
799 | */ |
800 | struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, |
801 | int fh_len, int fh_type, struct inode *(*get_inode) |
802 | (struct super_block *sb, u64 ino, u32 gen)) |
803 | { |
804 | struct inode *inode = NULL; |
805 | |
806 | if (fh_len <= 2) |
807 | return NULL; |
808 | |
809 | switch (fh_type) { |
810 | case FILEID_INO32_GEN_PARENT: |
811 | inode = get_inode(sb, fid->i32.parent_ino, |
812 | (fh_len > 3 ? fid->i32.parent_gen : 0)); |
813 | break; |
814 | } |
815 | |
816 | return d_obtain_alias(inode); |
817 | } |
818 | EXPORT_SYMBOL_GPL(generic_fh_to_parent); |
819 | |
820 | int simple_fsync(struct file *file, struct dentry *dentry, int datasync) |
821 | { |
822 | struct writeback_control wbc = { |
823 | .sync_mode = WB_SYNC_ALL, |
824 | .nr_to_write = 0, /* metadata-only; caller takes care of data */ |
825 | }; |
826 | struct inode *inode = dentry->d_inode; |
827 | int err; |
828 | int ret; |
829 | |
830 | ret = sync_mapping_buffers(inode->i_mapping); |
831 | if (!(inode->i_state & I_DIRTY)) |
832 | return ret; |
833 | if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) |
834 | return ret; |
835 | |
836 | err = sync_inode(inode, &wbc); |
837 | if (ret == 0) |
838 | ret = err; |
839 | return ret; |
840 | } |
841 | EXPORT_SYMBOL(simple_fsync); |
842 | |
843 | EXPORT_SYMBOL(dcache_dir_close); |
844 | EXPORT_SYMBOL(dcache_dir_lseek); |
845 | EXPORT_SYMBOL(dcache_dir_open); |
846 | EXPORT_SYMBOL(dcache_readdir); |
847 | EXPORT_SYMBOL(generic_read_dir); |
848 | EXPORT_SYMBOL(get_sb_pseudo); |
849 | EXPORT_SYMBOL(simple_write_begin); |
850 | EXPORT_SYMBOL(simple_write_end); |
851 | EXPORT_SYMBOL(simple_dir_inode_operations); |
852 | EXPORT_SYMBOL(simple_dir_operations); |
853 | EXPORT_SYMBOL(simple_empty); |
854 | EXPORT_SYMBOL(simple_fill_super); |
855 | EXPORT_SYMBOL(simple_getattr); |
856 | EXPORT_SYMBOL(simple_link); |
857 | EXPORT_SYMBOL(simple_lookup); |
858 | EXPORT_SYMBOL(simple_pin_fs); |
859 | EXPORT_SYMBOL(simple_readpage); |
860 | EXPORT_SYMBOL(simple_release_fs); |
861 | EXPORT_SYMBOL(simple_rename); |
862 | EXPORT_SYMBOL(simple_rmdir); |
863 | EXPORT_SYMBOL(simple_statfs); |
864 | EXPORT_SYMBOL(simple_sync_file); |
865 | EXPORT_SYMBOL(simple_unlink); |
866 | EXPORT_SYMBOL(simple_read_from_buffer); |
867 | EXPORT_SYMBOL(memory_read_from_buffer); |
868 | EXPORT_SYMBOL(simple_transaction_set); |
869 | EXPORT_SYMBOL(simple_transaction_get); |
870 | EXPORT_SYMBOL(simple_transaction_read); |
871 | EXPORT_SYMBOL(simple_transaction_release); |
872 | EXPORT_SYMBOL_GPL(simple_attr_open); |
873 | EXPORT_SYMBOL_GPL(simple_attr_release); |
874 | EXPORT_SYMBOL_GPL(simple_attr_read); |
875 | EXPORT_SYMBOL_GPL(simple_attr_write); |
876 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9