Root/
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: |
3 | * |
4 | * dcache.c |
5 | * |
6 | * dentry cache handling code |
7 | * |
8 | * Copyright (C) 2002, 2004 Oracle. All rights reserved. |
9 | * |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public |
12 | * License as published by the Free Software Foundation; either |
13 | * version 2 of the License, or (at your option) any later version. |
14 | * |
15 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
18 | * General Public License for more details. |
19 | * |
20 | * You should have received a copy of the GNU General Public |
21 | * License along with this program; if not, write to the |
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
23 | * Boston, MA 021110-1307, USA. |
24 | */ |
25 | |
26 | #include <linux/fs.h> |
27 | #include <linux/types.h> |
28 | #include <linux/slab.h> |
29 | #include <linux/namei.h> |
30 | |
31 | #include <cluster/masklog.h> |
32 | |
33 | #include "ocfs2.h" |
34 | |
35 | #include "alloc.h" |
36 | #include "dcache.h" |
37 | #include "dlmglue.h" |
38 | #include "file.h" |
39 | #include "inode.h" |
40 | #include "super.h" |
41 | #include "ocfs2_trace.h" |
42 | |
43 | void ocfs2_dentry_attach_gen(struct dentry *dentry) |
44 | { |
45 | unsigned long gen = |
46 | OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; |
47 | BUG_ON(dentry->d_inode); |
48 | dentry->d_fsdata = (void *)gen; |
49 | } |
50 | |
51 | |
52 | static int ocfs2_dentry_revalidate(struct dentry *dentry, |
53 | struct nameidata *nd) |
54 | { |
55 | struct inode *inode; |
56 | int ret = 0; /* if all else fails, just return false */ |
57 | struct ocfs2_super *osb; |
58 | |
59 | if (nd && nd->flags & LOOKUP_RCU) |
60 | return -ECHILD; |
61 | |
62 | inode = dentry->d_inode; |
63 | osb = OCFS2_SB(dentry->d_sb); |
64 | |
65 | trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len, |
66 | dentry->d_name.name); |
67 | |
68 | /* For a negative dentry - |
69 | * check the generation number of the parent and compare with the |
70 | * one stored in the inode. |
71 | */ |
72 | if (inode == NULL) { |
73 | unsigned long gen = (unsigned long) dentry->d_fsdata; |
74 | unsigned long pgen = |
75 | OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; |
76 | |
77 | trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, |
78 | dentry->d_name.name, |
79 | pgen, gen); |
80 | if (gen != pgen) |
81 | goto bail; |
82 | goto valid; |
83 | } |
84 | |
85 | BUG_ON(!osb); |
86 | |
87 | if (inode == osb->root_inode || is_bad_inode(inode)) |
88 | goto bail; |
89 | |
90 | spin_lock(&OCFS2_I(inode)->ip_lock); |
91 | /* did we or someone else delete this inode? */ |
92 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { |
93 | spin_unlock(&OCFS2_I(inode)->ip_lock); |
94 | trace_ocfs2_dentry_revalidate_delete( |
95 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
96 | goto bail; |
97 | } |
98 | spin_unlock(&OCFS2_I(inode)->ip_lock); |
99 | |
100 | /* |
101 | * We don't need a cluster lock to test this because once an |
102 | * inode nlink hits zero, it never goes back. |
103 | */ |
104 | if (inode->i_nlink == 0) { |
105 | trace_ocfs2_dentry_revalidate_orphaned( |
106 | (unsigned long long)OCFS2_I(inode)->ip_blkno, |
107 | S_ISDIR(inode->i_mode)); |
108 | goto bail; |
109 | } |
110 | |
111 | /* |
112 | * If the last lookup failed to create dentry lock, let us |
113 | * redo it. |
114 | */ |
115 | if (!dentry->d_fsdata) { |
116 | trace_ocfs2_dentry_revalidate_nofsdata( |
117 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
118 | goto bail; |
119 | } |
120 | |
121 | valid: |
122 | ret = 1; |
123 | |
124 | bail: |
125 | trace_ocfs2_dentry_revalidate_ret(ret); |
126 | return ret; |
127 | } |
128 | |
129 | static int ocfs2_match_dentry(struct dentry *dentry, |
130 | u64 parent_blkno, |
131 | int skip_unhashed) |
132 | { |
133 | struct inode *parent; |
134 | |
135 | /* |
136 | * ocfs2_lookup() does a d_splice_alias() _before_ attaching |
137 | * to the lock data, so we skip those here, otherwise |
138 | * ocfs2_dentry_attach_lock() will get its original dentry |
139 | * back. |
140 | */ |
141 | if (!dentry->d_fsdata) |
142 | return 0; |
143 | |
144 | if (!dentry->d_parent) |
145 | return 0; |
146 | |
147 | if (skip_unhashed && d_unhashed(dentry)) |
148 | return 0; |
149 | |
150 | parent = dentry->d_parent->d_inode; |
151 | /* Negative parent dentry? */ |
152 | if (!parent) |
153 | return 0; |
154 | |
155 | /* Name is in a different directory. */ |
156 | if (OCFS2_I(parent)->ip_blkno != parent_blkno) |
157 | return 0; |
158 | |
159 | return 1; |
160 | } |
161 | |
162 | /* |
163 | * Walk the inode alias list, and find a dentry which has a given |
164 | * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it |
165 | * is looking for a dentry_lock reference. The downconvert thread is |
166 | * looking to unhash aliases, so we allow it to skip any that already |
167 | * have that property. |
168 | */ |
169 | struct dentry *ocfs2_find_local_alias(struct inode *inode, |
170 | u64 parent_blkno, |
171 | int skip_unhashed) |
172 | { |
173 | struct list_head *p; |
174 | struct dentry *dentry = NULL; |
175 | |
176 | spin_lock(&inode->i_lock); |
177 | list_for_each(p, &inode->i_dentry) { |
178 | dentry = list_entry(p, struct dentry, d_alias); |
179 | |
180 | spin_lock(&dentry->d_lock); |
181 | if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { |
182 | trace_ocfs2_find_local_alias(dentry->d_name.len, |
183 | dentry->d_name.name); |
184 | |
185 | dget_dlock(dentry); |
186 | spin_unlock(&dentry->d_lock); |
187 | break; |
188 | } |
189 | spin_unlock(&dentry->d_lock); |
190 | |
191 | dentry = NULL; |
192 | } |
193 | |
194 | spin_unlock(&inode->i_lock); |
195 | |
196 | return dentry; |
197 | } |
198 | |
199 | DEFINE_SPINLOCK(dentry_attach_lock); |
200 | |
201 | /* |
202 | * Attach this dentry to a cluster lock. |
203 | * |
204 | * Dentry locks cover all links in a given directory to a particular |
205 | * inode. We do this so that ocfs2 can build a lock name which all |
206 | * nodes in the cluster can agree on at all times. Shoving full names |
207 | * in the cluster lock won't work due to size restrictions. Covering |
208 | * links inside of a directory is a good compromise because it still |
209 | * allows us to use the parent directory lock to synchronize |
210 | * operations. |
211 | * |
212 | * Call this function with the parent dir semaphore and the parent dir |
213 | * cluster lock held. |
214 | * |
215 | * The dir semaphore will protect us from having to worry about |
216 | * concurrent processes on our node trying to attach a lock at the |
217 | * same time. |
218 | * |
219 | * The dir cluster lock (held at either PR or EX mode) protects us |
220 | * from unlink and rename on other nodes. |
221 | * |
222 | * A dput() can happen asynchronously due to pruning, so we cover |
223 | * attaching and detaching the dentry lock with a |
224 | * dentry_attach_lock. |
225 | * |
226 | * A node which has done lookup on a name retains a protected read |
227 | * lock until final dput. If the user requests and unlink or rename, |
228 | * the protected read is upgraded to an exclusive lock. Other nodes |
229 | * who have seen the dentry will then be informed that they need to |
230 | * downgrade their lock, which will involve d_delete on the |
231 | * dentry. This happens in ocfs2_dentry_convert_worker(). |
232 | */ |
233 | int ocfs2_dentry_attach_lock(struct dentry *dentry, |
234 | struct inode *inode, |
235 | u64 parent_blkno) |
236 | { |
237 | int ret; |
238 | struct dentry *alias; |
239 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; |
240 | |
241 | trace_ocfs2_dentry_attach_lock(dentry->d_name.len, dentry->d_name.name, |
242 | (unsigned long long)parent_blkno, dl); |
243 | |
244 | /* |
245 | * Negative dentry. We ignore these for now. |
246 | * |
247 | * XXX: Could we can improve ocfs2_dentry_revalidate() by |
248 | * tracking these? |
249 | */ |
250 | if (!inode) |
251 | return 0; |
252 | |
253 | if (!dentry->d_inode && dentry->d_fsdata) { |
254 | /* Converting a negative dentry to positive |
255 | Clear dentry->d_fsdata */ |
256 | dentry->d_fsdata = dl = NULL; |
257 | } |
258 | |
259 | if (dl) { |
260 | mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, |
261 | " \"%.*s\": old parent: %llu, new: %llu\n", |
262 | dentry->d_name.len, dentry->d_name.name, |
263 | (unsigned long long)parent_blkno, |
264 | (unsigned long long)dl->dl_parent_blkno); |
265 | return 0; |
266 | } |
267 | |
268 | alias = ocfs2_find_local_alias(inode, parent_blkno, 0); |
269 | if (alias) { |
270 | /* |
271 | * Great, an alias exists, which means we must have a |
272 | * dentry lock already. We can just grab the lock off |
273 | * the alias and add it to the list. |
274 | * |
275 | * We're depending here on the fact that this dentry |
276 | * was found and exists in the dcache and so must have |
277 | * a reference to the dentry_lock because we can't |
278 | * race creates. Final dput() cannot happen on it |
279 | * since we have it pinned, so our reference is safe. |
280 | */ |
281 | dl = alias->d_fsdata; |
282 | mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n", |
283 | (unsigned long long)parent_blkno, |
284 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
285 | |
286 | mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, |
287 | " \"%.*s\": old parent: %llu, new: %llu\n", |
288 | dentry->d_name.len, dentry->d_name.name, |
289 | (unsigned long long)parent_blkno, |
290 | (unsigned long long)dl->dl_parent_blkno); |
291 | |
292 | trace_ocfs2_dentry_attach_lock_found(dl->dl_lockres.l_name, |
293 | (unsigned long long)parent_blkno, |
294 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
295 | |
296 | goto out_attach; |
297 | } |
298 | |
299 | /* |
300 | * There are no other aliases |
301 | */ |
302 | dl = kmalloc(sizeof(*dl), GFP_NOFS); |
303 | if (!dl) { |
304 | ret = -ENOMEM; |
305 | mlog_errno(ret); |
306 | return ret; |
307 | } |
308 | |
309 | dl->dl_count = 0; |
310 | /* |
311 | * Does this have to happen below, for all attaches, in case |
312 | * the struct inode gets blown away by the downconvert thread? |
313 | */ |
314 | dl->dl_inode = igrab(inode); |
315 | dl->dl_parent_blkno = parent_blkno; |
316 | ocfs2_dentry_lock_res_init(dl, parent_blkno, inode); |
317 | |
318 | out_attach: |
319 | spin_lock(&dentry_attach_lock); |
320 | dentry->d_fsdata = dl; |
321 | dl->dl_count++; |
322 | spin_unlock(&dentry_attach_lock); |
323 | |
324 | /* |
325 | * This actually gets us our PRMODE level lock. From now on, |
326 | * we'll have a notification if one of these names is |
327 | * destroyed on another node. |
328 | */ |
329 | ret = ocfs2_dentry_lock(dentry, 0); |
330 | if (!ret) |
331 | ocfs2_dentry_unlock(dentry, 0); |
332 | else |
333 | mlog_errno(ret); |
334 | |
335 | /* |
336 | * In case of error, manually free the allocation and do the iput(). |
337 | * We need to do this because error here means no d_instantiate(), |
338 | * which means iput() will not be called during dput(dentry). |
339 | */ |
340 | if (ret < 0 && !alias) { |
341 | ocfs2_lock_res_free(&dl->dl_lockres); |
342 | BUG_ON(dl->dl_count != 1); |
343 | spin_lock(&dentry_attach_lock); |
344 | dentry->d_fsdata = NULL; |
345 | spin_unlock(&dentry_attach_lock); |
346 | kfree(dl); |
347 | iput(inode); |
348 | } |
349 | |
350 | dput(alias); |
351 | |
352 | return ret; |
353 | } |
354 | |
355 | DEFINE_SPINLOCK(dentry_list_lock); |
356 | |
357 | /* We limit the number of dentry locks to drop in one go. We have |
358 | * this limit so that we don't starve other users of ocfs2_wq. */ |
359 | #define DL_INODE_DROP_COUNT 64 |
360 | |
361 | /* Drop inode references from dentry locks */ |
362 | static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count) |
363 | { |
364 | struct ocfs2_dentry_lock *dl; |
365 | |
366 | spin_lock(&dentry_list_lock); |
367 | while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) { |
368 | dl = osb->dentry_lock_list; |
369 | osb->dentry_lock_list = dl->dl_next; |
370 | spin_unlock(&dentry_list_lock); |
371 | iput(dl->dl_inode); |
372 | kfree(dl); |
373 | spin_lock(&dentry_list_lock); |
374 | } |
375 | spin_unlock(&dentry_list_lock); |
376 | } |
377 | |
378 | void ocfs2_drop_dl_inodes(struct work_struct *work) |
379 | { |
380 | struct ocfs2_super *osb = container_of(work, struct ocfs2_super, |
381 | dentry_lock_work); |
382 | |
383 | __ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT); |
384 | /* |
385 | * Don't queue dropping if umount is in progress. We flush the |
386 | * list in ocfs2_dismount_volume |
387 | */ |
388 | spin_lock(&dentry_list_lock); |
389 | if (osb->dentry_lock_list && |
390 | !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) |
391 | queue_work(ocfs2_wq, &osb->dentry_lock_work); |
392 | spin_unlock(&dentry_list_lock); |
393 | } |
394 | |
395 | /* Flush the whole work queue */ |
396 | void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb) |
397 | { |
398 | __ocfs2_drop_dl_inodes(osb, -1); |
399 | } |
400 | |
401 | /* |
402 | * ocfs2_dentry_iput() and friends. |
403 | * |
404 | * At this point, our particular dentry is detached from the inodes |
405 | * alias list, so there's no way that the locking code can find it. |
406 | * |
407 | * The interesting stuff happens when we determine that our lock needs |
408 | * to go away because this is the last subdir alias in the |
409 | * system. This function needs to handle a couple things: |
410 | * |
411 | * 1) Synchronizing lock shutdown with the downconvert threads. This |
412 | * is already handled for us via the lockres release drop function |
413 | * called in ocfs2_release_dentry_lock() |
414 | * |
415 | * 2) A race may occur when we're doing our lock shutdown and |
416 | * another process wants to create a new dentry lock. Right now we |
417 | * let them race, which means that for a very short while, this |
418 | * node might have two locks on a lock resource. This should be a |
419 | * problem though because one of them is in the process of being |
420 | * thrown out. |
421 | */ |
422 | static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, |
423 | struct ocfs2_dentry_lock *dl) |
424 | { |
425 | ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); |
426 | ocfs2_lock_res_free(&dl->dl_lockres); |
427 | |
428 | /* We leave dropping of inode reference to ocfs2_wq as that can |
429 | * possibly lead to inode deletion which gets tricky */ |
430 | spin_lock(&dentry_list_lock); |
431 | if (!osb->dentry_lock_list && |
432 | !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) |
433 | queue_work(ocfs2_wq, &osb->dentry_lock_work); |
434 | dl->dl_next = osb->dentry_lock_list; |
435 | osb->dentry_lock_list = dl; |
436 | spin_unlock(&dentry_list_lock); |
437 | } |
438 | |
439 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, |
440 | struct ocfs2_dentry_lock *dl) |
441 | { |
442 | int unlock; |
443 | |
444 | BUG_ON(dl->dl_count == 0); |
445 | |
446 | spin_lock(&dentry_attach_lock); |
447 | dl->dl_count--; |
448 | unlock = !dl->dl_count; |
449 | spin_unlock(&dentry_attach_lock); |
450 | |
451 | if (unlock) |
452 | ocfs2_drop_dentry_lock(osb, dl); |
453 | } |
454 | |
455 | static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode) |
456 | { |
457 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; |
458 | |
459 | if (!dl) { |
460 | /* |
461 | * No dentry lock is ok if we're disconnected or |
462 | * unhashed. |
463 | */ |
464 | if (!(dentry->d_flags & DCACHE_DISCONNECTED) && |
465 | !d_unhashed(dentry)) { |
466 | unsigned long long ino = 0ULL; |
467 | if (inode) |
468 | ino = (unsigned long long)OCFS2_I(inode)->ip_blkno; |
469 | mlog(ML_ERROR, "Dentry is missing cluster lock. " |
470 | "inode: %llu, d_flags: 0x%x, d_name: %.*s\n", |
471 | ino, dentry->d_flags, dentry->d_name.len, |
472 | dentry->d_name.name); |
473 | } |
474 | |
475 | goto out; |
476 | } |
477 | |
478 | mlog_bug_on_msg(dl->dl_count == 0, "dentry: %.*s, count: %u\n", |
479 | dentry->d_name.len, dentry->d_name.name, |
480 | dl->dl_count); |
481 | |
482 | ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl); |
483 | |
484 | out: |
485 | iput(inode); |
486 | } |
487 | |
488 | /* |
489 | * d_move(), but keep the locks in sync. |
490 | * |
491 | * When we are done, "dentry" will have the parent dir and name of |
492 | * "target", which will be thrown away. |
493 | * |
494 | * We manually update the lock of "dentry" if need be. |
495 | * |
496 | * "target" doesn't have it's dentry lock touched - we allow the later |
497 | * dput() to handle this for us. |
498 | * |
499 | * This is called during ocfs2_rename(), while holding parent |
500 | * directory locks. The dentries have already been deleted on other |
501 | * nodes via ocfs2_remote_dentry_delete(). |
502 | * |
503 | * Normally, the VFS handles the d_move() for the file system, after |
504 | * the ->rename() callback. OCFS2 wants to handle this internally, so |
505 | * the new lock can be created atomically with respect to the cluster. |
506 | */ |
507 | void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, |
508 | struct inode *old_dir, struct inode *new_dir) |
509 | { |
510 | int ret; |
511 | struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb); |
512 | struct inode *inode = dentry->d_inode; |
513 | |
514 | /* |
515 | * Move within the same directory, so the actual lock info won't |
516 | * change. |
517 | * |
518 | * XXX: Is there any advantage to dropping the lock here? |
519 | */ |
520 | if (old_dir == new_dir) |
521 | goto out_move; |
522 | |
523 | ocfs2_dentry_lock_put(osb, dentry->d_fsdata); |
524 | |
525 | dentry->d_fsdata = NULL; |
526 | ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno); |
527 | if (ret) |
528 | mlog_errno(ret); |
529 | |
530 | out_move: |
531 | d_move(dentry, target); |
532 | } |
533 | |
534 | const struct dentry_operations ocfs2_dentry_ops = { |
535 | .d_revalidate = ocfs2_dentry_revalidate, |
536 | .d_iput = ocfs2_dentry_iput, |
537 | }; |
538 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9