Root/
1 | /* |
2 | * linux/fs/ufs/inode.c |
3 | * |
4 | * Copyright (C) 1998 |
5 | * Daniel Pirkl <daniel.pirkl@email.cz> |
6 | * Charles University, Faculty of Mathematics and Physics |
7 | * |
8 | * from |
9 | * |
10 | * linux/fs/ext2/inode.c |
11 | * |
12 | * Copyright (C) 1992, 1993, 1994, 1995 |
13 | * Remy Card (card@masi.ibp.fr) |
14 | * Laboratoire MASI - Institut Blaise Pascal |
15 | * Universite Pierre et Marie Curie (Paris VI) |
16 | * |
17 | * from |
18 | * |
19 | * linux/fs/minix/inode.c |
20 | * |
21 | * Copyright (C) 1991, 1992 Linus Torvalds |
22 | * |
23 | * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 |
24 | * Big-endian to little-endian byte-swapping/bitmaps by |
25 | * David S. Miller (davem@caip.rutgers.edu), 1995 |
26 | */ |
27 | |
28 | #include <asm/uaccess.h> |
29 | #include <asm/system.h> |
30 | |
31 | #include <linux/errno.h> |
32 | #include <linux/fs.h> |
33 | #include <linux/time.h> |
34 | #include <linux/stat.h> |
35 | #include <linux/string.h> |
36 | #include <linux/mm.h> |
37 | #include <linux/smp_lock.h> |
38 | #include <linux/buffer_head.h> |
39 | #include <linux/writeback.h> |
40 | #include <linux/quotaops.h> |
41 | |
42 | #include "ufs_fs.h" |
43 | #include "ufs.h" |
44 | #include "swab.h" |
45 | #include "util.h" |
46 | |
47 | static u64 ufs_frag_map(struct inode *inode, sector_t frag); |
48 | |
49 | static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4]) |
50 | { |
51 | struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; |
52 | int ptrs = uspi->s_apb; |
53 | int ptrs_bits = uspi->s_apbshift; |
54 | const long direct_blocks = UFS_NDADDR, |
55 | indirect_blocks = ptrs, |
56 | double_blocks = (1 << (ptrs_bits * 2)); |
57 | int n = 0; |
58 | |
59 | |
60 | UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); |
61 | if (i_block < direct_blocks) { |
62 | offsets[n++] = i_block; |
63 | } else if ((i_block -= direct_blocks) < indirect_blocks) { |
64 | offsets[n++] = UFS_IND_BLOCK; |
65 | offsets[n++] = i_block; |
66 | } else if ((i_block -= indirect_blocks) < double_blocks) { |
67 | offsets[n++] = UFS_DIND_BLOCK; |
68 | offsets[n++] = i_block >> ptrs_bits; |
69 | offsets[n++] = i_block & (ptrs - 1); |
70 | } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { |
71 | offsets[n++] = UFS_TIND_BLOCK; |
72 | offsets[n++] = i_block >> (ptrs_bits * 2); |
73 | offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); |
74 | offsets[n++] = i_block & (ptrs - 1); |
75 | } else { |
76 | ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); |
77 | } |
78 | return n; |
79 | } |
80 | |
81 | /* |
82 | * Returns the location of the fragment from |
83 | * the begining of the filesystem. |
84 | */ |
85 | |
86 | static u64 ufs_frag_map(struct inode *inode, sector_t frag) |
87 | { |
88 | struct ufs_inode_info *ufsi = UFS_I(inode); |
89 | struct super_block *sb = inode->i_sb; |
90 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
91 | u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; |
92 | int shift = uspi->s_apbshift-uspi->s_fpbshift; |
93 | sector_t offsets[4], *p; |
94 | int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); |
95 | u64 ret = 0L; |
96 | __fs32 block; |
97 | __fs64 u2_block = 0L; |
98 | unsigned flags = UFS_SB(sb)->s_flags; |
99 | u64 temp = 0L; |
100 | |
101 | UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth); |
102 | UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n", |
103 | uspi->s_fpbshift, uspi->s_apbmask, |
104 | (unsigned long long)mask); |
105 | |
106 | if (depth == 0) |
107 | return 0; |
108 | |
109 | p = offsets; |
110 | |
111 | lock_kernel(); |
112 | if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) |
113 | goto ufs2; |
114 | |
115 | block = ufsi->i_u1.i_data[*p++]; |
116 | if (!block) |
117 | goto out; |
118 | while (--depth) { |
119 | struct buffer_head *bh; |
120 | sector_t n = *p++; |
121 | |
122 | bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift)); |
123 | if (!bh) |
124 | goto out; |
125 | block = ((__fs32 *) bh->b_data)[n & mask]; |
126 | brelse (bh); |
127 | if (!block) |
128 | goto out; |
129 | } |
130 | ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask)); |
131 | goto out; |
132 | ufs2: |
133 | u2_block = ufsi->i_u1.u2_i_data[*p++]; |
134 | if (!u2_block) |
135 | goto out; |
136 | |
137 | |
138 | while (--depth) { |
139 | struct buffer_head *bh; |
140 | sector_t n = *p++; |
141 | |
142 | |
143 | temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block); |
144 | bh = sb_bread(sb, temp +(u64) (n>>shift)); |
145 | if (!bh) |
146 | goto out; |
147 | u2_block = ((__fs64 *)bh->b_data)[n & mask]; |
148 | brelse(bh); |
149 | if (!u2_block) |
150 | goto out; |
151 | } |
152 | temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block); |
153 | ret = temp + (u64) (frag & uspi->s_fpbmask); |
154 | |
155 | out: |
156 | unlock_kernel(); |
157 | return ret; |
158 | } |
159 | |
160 | /** |
161 | * ufs_inode_getfrag() - allocate new fragment(s) |
162 | * @inode - pointer to inode |
163 | * @fragment - number of `fragment' which hold pointer |
164 | * to new allocated fragment(s) |
165 | * @new_fragment - number of new allocated fragment(s) |
166 | * @required - how many fragment(s) we require |
167 | * @err - we set it if something wrong |
168 | * @phys - pointer to where we save physical number of new allocated fragments, |
169 | * NULL if we allocate not data(indirect blocks for example). |
170 | * @new - we set it if we allocate new block |
171 | * @locked_page - for ufs_new_fragments() |
172 | */ |
173 | static struct buffer_head * |
174 | ufs_inode_getfrag(struct inode *inode, u64 fragment, |
175 | sector_t new_fragment, unsigned int required, int *err, |
176 | long *phys, int *new, struct page *locked_page) |
177 | { |
178 | struct ufs_inode_info *ufsi = UFS_I(inode); |
179 | struct super_block *sb = inode->i_sb; |
180 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
181 | struct buffer_head * result; |
182 | unsigned blockoff, lastblockoff; |
183 | u64 tmp, goal, lastfrag, block, lastblock; |
184 | void *p, *p2; |
185 | |
186 | UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, " |
187 | "metadata %d\n", inode->i_ino, (unsigned long long)fragment, |
188 | (unsigned long long)new_fragment, required, !phys); |
189 | |
190 | /* TODO : to be done for write support |
191 | if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) |
192 | goto ufs2; |
193 | */ |
194 | |
195 | block = ufs_fragstoblks (fragment); |
196 | blockoff = ufs_fragnum (fragment); |
197 | p = ufs_get_direct_data_ptr(uspi, ufsi, block); |
198 | |
199 | goal = 0; |
200 | |
201 | repeat: |
202 | tmp = ufs_data_ptr_to_cpu(sb, p); |
203 | |
204 | lastfrag = ufsi->i_lastfrag; |
205 | if (tmp && fragment < lastfrag) { |
206 | if (!phys) { |
207 | result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); |
208 | if (tmp == ufs_data_ptr_to_cpu(sb, p)) { |
209 | UFSD("EXIT, result %llu\n", |
210 | (unsigned long long)tmp + blockoff); |
211 | return result; |
212 | } |
213 | brelse (result); |
214 | goto repeat; |
215 | } else { |
216 | *phys = uspi->s_sbbase + tmp + blockoff; |
217 | return NULL; |
218 | } |
219 | } |
220 | |
221 | lastblock = ufs_fragstoblks (lastfrag); |
222 | lastblockoff = ufs_fragnum (lastfrag); |
223 | /* |
224 | * We will extend file into new block beyond last allocated block |
225 | */ |
226 | if (lastblock < block) { |
227 | /* |
228 | * We must reallocate last allocated block |
229 | */ |
230 | if (lastblockoff) { |
231 | p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock); |
232 | tmp = ufs_new_fragments(inode, p2, lastfrag, |
233 | ufs_data_ptr_to_cpu(sb, p2), |
234 | uspi->s_fpb - lastblockoff, |
235 | err, locked_page); |
236 | if (!tmp) { |
237 | if (lastfrag != ufsi->i_lastfrag) |
238 | goto repeat; |
239 | else |
240 | return NULL; |
241 | } |
242 | lastfrag = ufsi->i_lastfrag; |
243 | |
244 | } |
245 | tmp = ufs_data_ptr_to_cpu(sb, |
246 | ufs_get_direct_data_ptr(uspi, ufsi, |
247 | lastblock)); |
248 | if (tmp) |
249 | goal = tmp + uspi->s_fpb; |
250 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, |
251 | goal, required + blockoff, |
252 | err, |
253 | phys != NULL ? locked_page : NULL); |
254 | } else if (lastblock == block) { |
255 | /* |
256 | * We will extend last allocated block |
257 | */ |
258 | tmp = ufs_new_fragments(inode, p, fragment - |
259 | (blockoff - lastblockoff), |
260 | ufs_data_ptr_to_cpu(sb, p), |
261 | required + (blockoff - lastblockoff), |
262 | err, phys != NULL ? locked_page : NULL); |
263 | } else /* (lastblock > block) */ { |
264 | /* |
265 | * We will allocate new block before last allocated block |
266 | */ |
267 | if (block) { |
268 | tmp = ufs_data_ptr_to_cpu(sb, |
269 | ufs_get_direct_data_ptr(uspi, ufsi, block - 1)); |
270 | if (tmp) |
271 | goal = tmp + uspi->s_fpb; |
272 | } |
273 | tmp = ufs_new_fragments(inode, p, fragment - blockoff, |
274 | goal, uspi->s_fpb, err, |
275 | phys != NULL ? locked_page : NULL); |
276 | } |
277 | if (!tmp) { |
278 | if ((!blockoff && ufs_data_ptr_to_cpu(sb, p)) || |
279 | (blockoff && lastfrag != ufsi->i_lastfrag)) |
280 | goto repeat; |
281 | *err = -ENOSPC; |
282 | return NULL; |
283 | } |
284 | |
285 | if (!phys) { |
286 | result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); |
287 | } else { |
288 | *phys = uspi->s_sbbase + tmp + blockoff; |
289 | result = NULL; |
290 | *err = 0; |
291 | *new = 1; |
292 | } |
293 | |
294 | inode->i_ctime = CURRENT_TIME_SEC; |
295 | if (IS_SYNC(inode)) |
296 | ufs_sync_inode (inode); |
297 | mark_inode_dirty(inode); |
298 | UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff); |
299 | return result; |
300 | |
301 | /* This part : To be implemented .... |
302 | Required only for writing, not required for READ-ONLY. |
303 | ufs2: |
304 | |
305 | u2_block = ufs_fragstoblks(fragment); |
306 | u2_blockoff = ufs_fragnum(fragment); |
307 | p = ufsi->i_u1.u2_i_data + block; |
308 | goal = 0; |
309 | |
310 | repeat2: |
311 | tmp = fs32_to_cpu(sb, *p); |
312 | lastfrag = ufsi->i_lastfrag; |
313 | |
314 | */ |
315 | } |
316 | |
317 | /** |
318 | * ufs_inode_getblock() - allocate new block |
319 | * @inode - pointer to inode |
320 | * @bh - pointer to block which hold "pointer" to new allocated block |
321 | * @fragment - number of `fragment' which hold pointer |
322 | * to new allocated block |
323 | * @new_fragment - number of new allocated fragment |
324 | * (block will hold this fragment and also uspi->s_fpb-1) |
325 | * @err - see ufs_inode_getfrag() |
326 | * @phys - see ufs_inode_getfrag() |
327 | * @new - see ufs_inode_getfrag() |
328 | * @locked_page - see ufs_inode_getfrag() |
329 | */ |
330 | static struct buffer_head * |
331 | ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, |
332 | u64 fragment, sector_t new_fragment, int *err, |
333 | long *phys, int *new, struct page *locked_page) |
334 | { |
335 | struct super_block *sb = inode->i_sb; |
336 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
337 | struct buffer_head * result; |
338 | unsigned blockoff; |
339 | u64 tmp, goal, block; |
340 | void *p; |
341 | |
342 | block = ufs_fragstoblks (fragment); |
343 | blockoff = ufs_fragnum (fragment); |
344 | |
345 | UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n", |
346 | inode->i_ino, (unsigned long long)fragment, |
347 | (unsigned long long)new_fragment, !phys); |
348 | |
349 | result = NULL; |
350 | if (!bh) |
351 | goto out; |
352 | if (!buffer_uptodate(bh)) { |
353 | ll_rw_block (READ, 1, &bh); |
354 | wait_on_buffer (bh); |
355 | if (!buffer_uptodate(bh)) |
356 | goto out; |
357 | } |
358 | if (uspi->fs_magic == UFS2_MAGIC) |
359 | p = (__fs64 *)bh->b_data + block; |
360 | else |
361 | p = (__fs32 *)bh->b_data + block; |
362 | repeat: |
363 | tmp = ufs_data_ptr_to_cpu(sb, p); |
364 | if (tmp) { |
365 | if (!phys) { |
366 | result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); |
367 | if (tmp == ufs_data_ptr_to_cpu(sb, p)) |
368 | goto out; |
369 | brelse (result); |
370 | goto repeat; |
371 | } else { |
372 | *phys = uspi->s_sbbase + tmp + blockoff; |
373 | goto out; |
374 | } |
375 | } |
376 | |
377 | if (block && (uspi->fs_magic == UFS2_MAGIC ? |
378 | (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) : |
379 | (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1])))) |
380 | goal = tmp + uspi->s_fpb; |
381 | else |
382 | goal = bh->b_blocknr + uspi->s_fpb; |
383 | tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, |
384 | uspi->s_fpb, err, locked_page); |
385 | if (!tmp) { |
386 | if (ufs_data_ptr_to_cpu(sb, p)) |
387 | goto repeat; |
388 | goto out; |
389 | } |
390 | |
391 | |
392 | if (!phys) { |
393 | result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); |
394 | } else { |
395 | *phys = uspi->s_sbbase + tmp + blockoff; |
396 | *new = 1; |
397 | } |
398 | |
399 | mark_buffer_dirty(bh); |
400 | if (IS_SYNC(inode)) |
401 | sync_dirty_buffer(bh); |
402 | inode->i_ctime = CURRENT_TIME_SEC; |
403 | mark_inode_dirty(inode); |
404 | UFSD("result %llu\n", (unsigned long long)tmp + blockoff); |
405 | out: |
406 | brelse (bh); |
407 | UFSD("EXIT\n"); |
408 | return result; |
409 | } |
410 | |
411 | /** |
412 | * ufs_getfrag_bloc() - `get_block_t' function, interface between UFS and |
413 | * readpage, writepage and so on |
414 | */ |
415 | |
416 | int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) |
417 | { |
418 | struct super_block * sb = inode->i_sb; |
419 | struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; |
420 | struct buffer_head * bh; |
421 | int ret, err, new; |
422 | unsigned long ptr,phys; |
423 | u64 phys64 = 0; |
424 | |
425 | if (!create) { |
426 | phys64 = ufs_frag_map(inode, fragment); |
427 | UFSD("phys64 = %llu\n", (unsigned long long)phys64); |
428 | if (phys64) |
429 | map_bh(bh_result, sb, phys64); |
430 | return 0; |
431 | } |
432 | |
433 | /* This code entered only while writing ....? */ |
434 | |
435 | err = -EIO; |
436 | new = 0; |
437 | ret = 0; |
438 | bh = NULL; |
439 | |
440 | lock_kernel(); |
441 | |
442 | UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); |
443 | if (fragment > |
444 | ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) |
445 | << uspi->s_fpbshift)) |
446 | goto abort_too_big; |
447 | |
448 | err = 0; |
449 | ptr = fragment; |
450 | |
451 | /* |
452 | * ok, these macros clean the logic up a bit and make |
453 | * it much more readable: |
454 | */ |
455 | #define GET_INODE_DATABLOCK(x) \ |
456 | ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\ |
457 | bh_result->b_page) |
458 | #define GET_INODE_PTR(x) \ |
459 | ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\ |
460 | bh_result->b_page) |
461 | #define GET_INDIRECT_DATABLOCK(x) \ |
462 | ufs_inode_getblock(inode, bh, x, fragment, \ |
463 | &err, &phys, &new, bh_result->b_page) |
464 | #define GET_INDIRECT_PTR(x) \ |
465 | ufs_inode_getblock(inode, bh, x, fragment, \ |
466 | &err, NULL, NULL, NULL) |
467 | |
468 | if (ptr < UFS_NDIR_FRAGMENT) { |
469 | bh = GET_INODE_DATABLOCK(ptr); |
470 | goto out; |
471 | } |
472 | ptr -= UFS_NDIR_FRAGMENT; |
473 | if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) { |
474 | bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); |
475 | goto get_indirect; |
476 | } |
477 | ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); |
478 | if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) { |
479 | bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); |
480 | goto get_double; |
481 | } |
482 | ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); |
483 | bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); |
484 | bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); |
485 | get_double: |
486 | bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); |
487 | get_indirect: |
488 | bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); |
489 | |
490 | #undef GET_INODE_DATABLOCK |
491 | #undef GET_INODE_PTR |
492 | #undef GET_INDIRECT_DATABLOCK |
493 | #undef GET_INDIRECT_PTR |
494 | |
495 | out: |
496 | if (err) |
497 | goto abort; |
498 | if (new) |
499 | set_buffer_new(bh_result); |
500 | map_bh(bh_result, sb, phys); |
501 | abort: |
502 | unlock_kernel(); |
503 | return err; |
504 | |
505 | abort_too_big: |
506 | ufs_warning(sb, "ufs_get_block", "block > big"); |
507 | goto abort; |
508 | } |
509 | |
510 | static struct buffer_head *ufs_getfrag(struct inode *inode, |
511 | unsigned int fragment, |
512 | int create, int *err) |
513 | { |
514 | struct buffer_head dummy; |
515 | int error; |
516 | |
517 | dummy.b_state = 0; |
518 | dummy.b_blocknr = -1000; |
519 | error = ufs_getfrag_block(inode, fragment, &dummy, create); |
520 | *err = error; |
521 | if (!error && buffer_mapped(&dummy)) { |
522 | struct buffer_head *bh; |
523 | bh = sb_getblk(inode->i_sb, dummy.b_blocknr); |
524 | if (buffer_new(&dummy)) { |
525 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); |
526 | set_buffer_uptodate(bh); |
527 | mark_buffer_dirty(bh); |
528 | } |
529 | return bh; |
530 | } |
531 | return NULL; |
532 | } |
533 | |
534 | struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment, |
535 | int create, int * err) |
536 | { |
537 | struct buffer_head * bh; |
538 | |
539 | UFSD("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment); |
540 | bh = ufs_getfrag (inode, fragment, create, err); |
541 | if (!bh || buffer_uptodate(bh)) |
542 | return bh; |
543 | ll_rw_block (READ, 1, &bh); |
544 | wait_on_buffer (bh); |
545 | if (buffer_uptodate(bh)) |
546 | return bh; |
547 | brelse (bh); |
548 | *err = -EIO; |
549 | return NULL; |
550 | } |
551 | |
552 | static int ufs_writepage(struct page *page, struct writeback_control *wbc) |
553 | { |
554 | return block_write_full_page(page,ufs_getfrag_block,wbc); |
555 | } |
556 | |
557 | static int ufs_readpage(struct file *file, struct page *page) |
558 | { |
559 | return block_read_full_page(page,ufs_getfrag_block); |
560 | } |
561 | |
562 | int __ufs_write_begin(struct file *file, struct address_space *mapping, |
563 | loff_t pos, unsigned len, unsigned flags, |
564 | struct page **pagep, void **fsdata) |
565 | { |
566 | return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, |
567 | ufs_getfrag_block); |
568 | } |
569 | |
570 | static int ufs_write_begin(struct file *file, struct address_space *mapping, |
571 | loff_t pos, unsigned len, unsigned flags, |
572 | struct page **pagep, void **fsdata) |
573 | { |
574 | *pagep = NULL; |
575 | return __ufs_write_begin(file, mapping, pos, len, flags, pagep, fsdata); |
576 | } |
577 | |
578 | static sector_t ufs_bmap(struct address_space *mapping, sector_t block) |
579 | { |
580 | return generic_block_bmap(mapping,block,ufs_getfrag_block); |
581 | } |
582 | |
583 | const struct address_space_operations ufs_aops = { |
584 | .readpage = ufs_readpage, |
585 | .writepage = ufs_writepage, |
586 | .sync_page = block_sync_page, |
587 | .write_begin = ufs_write_begin, |
588 | .write_end = generic_write_end, |
589 | .bmap = ufs_bmap |
590 | }; |
591 | |
592 | static void ufs_set_inode_ops(struct inode *inode) |
593 | { |
594 | if (S_ISREG(inode->i_mode)) { |
595 | inode->i_op = &ufs_file_inode_operations; |
596 | inode->i_fop = &ufs_file_operations; |
597 | inode->i_mapping->a_ops = &ufs_aops; |
598 | } else if (S_ISDIR(inode->i_mode)) { |
599 | inode->i_op = &ufs_dir_inode_operations; |
600 | inode->i_fop = &ufs_dir_operations; |
601 | inode->i_mapping->a_ops = &ufs_aops; |
602 | } else if (S_ISLNK(inode->i_mode)) { |
603 | if (!inode->i_blocks) |
604 | inode->i_op = &ufs_fast_symlink_inode_operations; |
605 | else { |
606 | inode->i_op = &page_symlink_inode_operations; |
607 | inode->i_mapping->a_ops = &ufs_aops; |
608 | } |
609 | } else |
610 | init_special_inode(inode, inode->i_mode, |
611 | ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); |
612 | } |
613 | |
614 | static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
615 | { |
616 | struct ufs_inode_info *ufsi = UFS_I(inode); |
617 | struct super_block *sb = inode->i_sb; |
618 | mode_t mode; |
619 | |
620 | /* |
621 | * Copy data to the in-core inode. |
622 | */ |
623 | inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); |
624 | inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink); |
625 | if (inode->i_nlink == 0) { |
626 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); |
627 | return -1; |
628 | } |
629 | |
630 | /* |
631 | * Linux now has 32-bit uid and gid, so we can support EFT. |
632 | */ |
633 | inode->i_uid = ufs_get_inode_uid(sb, ufs_inode); |
634 | inode->i_gid = ufs_get_inode_gid(sb, ufs_inode); |
635 | |
636 | inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); |
637 | inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); |
638 | inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); |
639 | inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); |
640 | inode->i_mtime.tv_nsec = 0; |
641 | inode->i_atime.tv_nsec = 0; |
642 | inode->i_ctime.tv_nsec = 0; |
643 | inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); |
644 | inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); |
645 | ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); |
646 | ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); |
647 | ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); |
648 | |
649 | |
650 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { |
651 | memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, |
652 | sizeof(ufs_inode->ui_u2.ui_addr)); |
653 | } else { |
654 | memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, |
655 | sizeof(ufs_inode->ui_u2.ui_symlink) - 1); |
656 | ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; |
657 | } |
658 | return 0; |
659 | } |
660 | |
661 | static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) |
662 | { |
663 | struct ufs_inode_info *ufsi = UFS_I(inode); |
664 | struct super_block *sb = inode->i_sb; |
665 | mode_t mode; |
666 | |
667 | UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); |
668 | /* |
669 | * Copy data to the in-core inode. |
670 | */ |
671 | inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); |
672 | inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink); |
673 | if (inode->i_nlink == 0) { |
674 | ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); |
675 | return -1; |
676 | } |
677 | |
678 | /* |
679 | * Linux now has 32-bit uid and gid, so we can support EFT. |
680 | */ |
681 | inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid); |
682 | inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid); |
683 | |
684 | inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); |
685 | inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); |
686 | inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime); |
687 | inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime); |
688 | inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec); |
689 | inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec); |
690 | inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec); |
691 | inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); |
692 | inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); |
693 | ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); |
694 | /* |
695 | ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); |
696 | ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); |
697 | */ |
698 | |
699 | if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { |
700 | memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, |
701 | sizeof(ufs2_inode->ui_u2.ui_addr)); |
702 | } else { |
703 | memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, |
704 | sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); |
705 | ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; |
706 | } |
707 | return 0; |
708 | } |
709 | |
710 | struct inode *ufs_iget(struct super_block *sb, unsigned long ino) |
711 | { |
712 | struct ufs_inode_info *ufsi; |
713 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
714 | struct buffer_head * bh; |
715 | struct inode *inode; |
716 | int err; |
717 | |
718 | UFSD("ENTER, ino %lu\n", ino); |
719 | |
720 | if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { |
721 | ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", |
722 | ino); |
723 | return ERR_PTR(-EIO); |
724 | } |
725 | |
726 | inode = iget_locked(sb, ino); |
727 | if (!inode) |
728 | return ERR_PTR(-ENOMEM); |
729 | if (!(inode->i_state & I_NEW)) |
730 | return inode; |
731 | |
732 | ufsi = UFS_I(inode); |
733 | |
734 | bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); |
735 | if (!bh) { |
736 | ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", |
737 | inode->i_ino); |
738 | goto bad_inode; |
739 | } |
740 | if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { |
741 | struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; |
742 | |
743 | err = ufs2_read_inode(inode, |
744 | ufs2_inode + ufs_inotofsbo(inode->i_ino)); |
745 | } else { |
746 | struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; |
747 | |
748 | err = ufs1_read_inode(inode, |
749 | ufs_inode + ufs_inotofsbo(inode->i_ino)); |
750 | } |
751 | |
752 | if (err) |
753 | goto bad_inode; |
754 | inode->i_version++; |
755 | ufsi->i_lastfrag = |
756 | (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; |
757 | ufsi->i_dir_start_lookup = 0; |
758 | ufsi->i_osync = 0; |
759 | |
760 | ufs_set_inode_ops(inode); |
761 | |
762 | brelse(bh); |
763 | |
764 | UFSD("EXIT\n"); |
765 | unlock_new_inode(inode); |
766 | return inode; |
767 | |
768 | bad_inode: |
769 | iget_failed(inode); |
770 | return ERR_PTR(-EIO); |
771 | } |
772 | |
773 | static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
774 | { |
775 | struct super_block *sb = inode->i_sb; |
776 | struct ufs_inode_info *ufsi = UFS_I(inode); |
777 | |
778 | ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); |
779 | ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); |
780 | |
781 | ufs_set_inode_uid(sb, ufs_inode, inode->i_uid); |
782 | ufs_set_inode_gid(sb, ufs_inode, inode->i_gid); |
783 | |
784 | ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); |
785 | ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); |
786 | ufs_inode->ui_atime.tv_usec = 0; |
787 | ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); |
788 | ufs_inode->ui_ctime.tv_usec = 0; |
789 | ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); |
790 | ufs_inode->ui_mtime.tv_usec = 0; |
791 | ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); |
792 | ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); |
793 | ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); |
794 | |
795 | if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { |
796 | ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); |
797 | ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); |
798 | } |
799 | |
800 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { |
801 | /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ |
802 | ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; |
803 | } else if (inode->i_blocks) { |
804 | memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, |
805 | sizeof(ufs_inode->ui_u2.ui_addr)); |
806 | } |
807 | else { |
808 | memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, |
809 | sizeof(ufs_inode->ui_u2.ui_symlink)); |
810 | } |
811 | |
812 | if (!inode->i_nlink) |
813 | memset (ufs_inode, 0, sizeof(struct ufs_inode)); |
814 | } |
815 | |
816 | static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) |
817 | { |
818 | struct super_block *sb = inode->i_sb; |
819 | struct ufs_inode_info *ufsi = UFS_I(inode); |
820 | |
821 | UFSD("ENTER\n"); |
822 | ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); |
823 | ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); |
824 | |
825 | ufs_inode->ui_uid = cpu_to_fs32(sb, inode->i_uid); |
826 | ufs_inode->ui_gid = cpu_to_fs32(sb, inode->i_gid); |
827 | |
828 | ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); |
829 | ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec); |
830 | ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec); |
831 | ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec); |
832 | ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec); |
833 | ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec); |
834 | ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec); |
835 | |
836 | ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); |
837 | ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); |
838 | ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); |
839 | |
840 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { |
841 | /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ |
842 | ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; |
843 | } else if (inode->i_blocks) { |
844 | memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, |
845 | sizeof(ufs_inode->ui_u2.ui_addr)); |
846 | } else { |
847 | memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, |
848 | sizeof(ufs_inode->ui_u2.ui_symlink)); |
849 | } |
850 | |
851 | if (!inode->i_nlink) |
852 | memset (ufs_inode, 0, sizeof(struct ufs2_inode)); |
853 | UFSD("EXIT\n"); |
854 | } |
855 | |
856 | static int ufs_update_inode(struct inode * inode, int do_sync) |
857 | { |
858 | struct super_block *sb = inode->i_sb; |
859 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
860 | struct buffer_head * bh; |
861 | |
862 | UFSD("ENTER, ino %lu\n", inode->i_ino); |
863 | |
864 | if (inode->i_ino < UFS_ROOTINO || |
865 | inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { |
866 | ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); |
867 | return -1; |
868 | } |
869 | |
870 | bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); |
871 | if (!bh) { |
872 | ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); |
873 | return -1; |
874 | } |
875 | if (uspi->fs_magic == UFS2_MAGIC) { |
876 | struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; |
877 | |
878 | ufs2_update_inode(inode, |
879 | ufs2_inode + ufs_inotofsbo(inode->i_ino)); |
880 | } else { |
881 | struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; |
882 | |
883 | ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); |
884 | } |
885 | |
886 | mark_buffer_dirty(bh); |
887 | if (do_sync) |
888 | sync_dirty_buffer(bh); |
889 | brelse (bh); |
890 | |
891 | UFSD("EXIT\n"); |
892 | return 0; |
893 | } |
894 | |
895 | int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) |
896 | { |
897 | int ret; |
898 | lock_kernel(); |
899 | ret = ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); |
900 | unlock_kernel(); |
901 | return ret; |
902 | } |
903 | |
904 | int ufs_sync_inode (struct inode *inode) |
905 | { |
906 | return ufs_update_inode (inode, 1); |
907 | } |
908 | |
909 | void ufs_delete_inode (struct inode * inode) |
910 | { |
911 | loff_t old_i_size; |
912 | |
913 | if (!is_bad_inode(inode)) |
914 | dquot_initialize(inode); |
915 | |
916 | truncate_inode_pages(&inode->i_data, 0); |
917 | if (is_bad_inode(inode)) |
918 | goto no_delete; |
919 | /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ |
920 | lock_kernel(); |
921 | mark_inode_dirty(inode); |
922 | ufs_update_inode(inode, IS_SYNC(inode)); |
923 | old_i_size = inode->i_size; |
924 | inode->i_size = 0; |
925 | if (inode->i_blocks && ufs_truncate(inode, old_i_size)) |
926 | ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n"); |
927 | ufs_free_inode (inode); |
928 | unlock_kernel(); |
929 | return; |
930 | no_delete: |
931 | clear_inode(inode); /* We must guarantee clearing of inode... */ |
932 | } |
933 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9