Root/
1 | /* |
2 | * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. |
3 | * |
4 | * This software may be freely redistributed under the terms of the |
5 | * GNU General Public License. |
6 | * |
7 | * You should have received a copy of the GNU General Public License |
8 | * along with this program; if not, write to the Free Software |
9 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
10 | * |
11 | * Authors: David Woodhouse <dwmw2@infradead.org> |
12 | * David Howells <dhowells@redhat.com> |
13 | * |
14 | */ |
15 | |
16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> |
18 | #include <linux/init.h> |
19 | #include <linux/circ_buf.h> |
20 | #include <linux/sched.h> |
21 | #include "internal.h" |
22 | |
23 | #if 0 |
24 | unsigned afs_vnode_update_timeout = 10; |
25 | #endif /* 0 */ |
26 | |
27 | #define afs_breakring_space(server) \ |
28 | CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \ |
29 | ARRAY_SIZE((server)->cb_break)) |
30 | |
31 | //static void afs_callback_updater(struct work_struct *); |
32 | |
33 | static struct workqueue_struct *afs_callback_update_worker; |
34 | |
35 | /* |
36 | * allow the fileserver to request callback state (re-)initialisation |
37 | */ |
38 | void afs_init_callback_state(struct afs_server *server) |
39 | { |
40 | struct afs_vnode *vnode; |
41 | |
42 | _enter("{%p}", server); |
43 | |
44 | spin_lock(&server->cb_lock); |
45 | |
46 | /* kill all the promises on record from this server */ |
47 | while (!RB_EMPTY_ROOT(&server->cb_promises)) { |
48 | vnode = rb_entry(server->cb_promises.rb_node, |
49 | struct afs_vnode, cb_promise); |
50 | _debug("UNPROMISE { vid=%x:%u uq=%u}", |
51 | vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); |
52 | rb_erase(&vnode->cb_promise, &server->cb_promises); |
53 | vnode->cb_promised = false; |
54 | } |
55 | |
56 | spin_unlock(&server->cb_lock); |
57 | _leave(""); |
58 | } |
59 | |
60 | /* |
61 | * handle the data invalidation side of a callback being broken |
62 | */ |
63 | void afs_broken_callback_work(struct work_struct *work) |
64 | { |
65 | struct afs_vnode *vnode = |
66 | container_of(work, struct afs_vnode, cb_broken_work); |
67 | |
68 | _enter(""); |
69 | |
70 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) |
71 | return; |
72 | |
73 | /* we're only interested in dealing with a broken callback on *this* |
74 | * vnode and only if no-one else has dealt with it yet */ |
75 | if (!mutex_trylock(&vnode->validate_lock)) |
76 | return; /* someone else is dealing with it */ |
77 | |
78 | if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { |
79 | if (S_ISDIR(vnode->vfs_inode.i_mode)) |
80 | afs_clear_permits(vnode); |
81 | |
82 | if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0) |
83 | goto out; |
84 | |
85 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) |
86 | goto out; |
87 | |
88 | /* if the vnode's data version number changed then its contents |
89 | * are different */ |
90 | if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) |
91 | afs_zap_data(vnode); |
92 | } |
93 | |
94 | out: |
95 | mutex_unlock(&vnode->validate_lock); |
96 | |
97 | /* avoid the potential race whereby the mutex_trylock() in this |
98 | * function happens again between the clear_bit() and the |
99 | * mutex_unlock() */ |
100 | if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { |
101 | _debug("requeue"); |
102 | queue_work(afs_callback_update_worker, &vnode->cb_broken_work); |
103 | } |
104 | _leave(""); |
105 | } |
106 | |
107 | /* |
108 | * actually break a callback |
109 | */ |
110 | static void afs_break_callback(struct afs_server *server, |
111 | struct afs_vnode *vnode) |
112 | { |
113 | _enter(""); |
114 | |
115 | set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); |
116 | |
117 | if (vnode->cb_promised) { |
118 | spin_lock(&vnode->lock); |
119 | |
120 | _debug("break callback"); |
121 | |
122 | spin_lock(&server->cb_lock); |
123 | if (vnode->cb_promised) { |
124 | rb_erase(&vnode->cb_promise, &server->cb_promises); |
125 | vnode->cb_promised = false; |
126 | } |
127 | spin_unlock(&server->cb_lock); |
128 | |
129 | queue_work(afs_callback_update_worker, &vnode->cb_broken_work); |
130 | if (list_empty(&vnode->granted_locks) && |
131 | !list_empty(&vnode->pending_locks)) |
132 | afs_lock_may_be_available(vnode); |
133 | spin_unlock(&vnode->lock); |
134 | } |
135 | } |
136 | |
137 | /* |
138 | * allow the fileserver to explicitly break one callback |
139 | * - happens when |
140 | * - the backing file is changed |
141 | * - a lock is released |
142 | */ |
143 | static void afs_break_one_callback(struct afs_server *server, |
144 | struct afs_fid *fid) |
145 | { |
146 | struct afs_vnode *vnode; |
147 | struct rb_node *p; |
148 | |
149 | _debug("find"); |
150 | spin_lock(&server->fs_lock); |
151 | p = server->fs_vnodes.rb_node; |
152 | while (p) { |
153 | vnode = rb_entry(p, struct afs_vnode, server_rb); |
154 | if (fid->vid < vnode->fid.vid) |
155 | p = p->rb_left; |
156 | else if (fid->vid > vnode->fid.vid) |
157 | p = p->rb_right; |
158 | else if (fid->vnode < vnode->fid.vnode) |
159 | p = p->rb_left; |
160 | else if (fid->vnode > vnode->fid.vnode) |
161 | p = p->rb_right; |
162 | else if (fid->unique < vnode->fid.unique) |
163 | p = p->rb_left; |
164 | else if (fid->unique > vnode->fid.unique) |
165 | p = p->rb_right; |
166 | else |
167 | goto found; |
168 | } |
169 | |
170 | /* not found so we just ignore it (it may have moved to another |
171 | * server) */ |
172 | not_available: |
173 | _debug("not avail"); |
174 | spin_unlock(&server->fs_lock); |
175 | _leave(""); |
176 | return; |
177 | |
178 | found: |
179 | _debug("found"); |
180 | ASSERTCMP(server, ==, vnode->server); |
181 | |
182 | if (!igrab(AFS_VNODE_TO_I(vnode))) |
183 | goto not_available; |
184 | spin_unlock(&server->fs_lock); |
185 | |
186 | afs_break_callback(server, vnode); |
187 | iput(&vnode->vfs_inode); |
188 | _leave(""); |
189 | } |
190 | |
191 | /* |
192 | * allow the fileserver to break callback promises |
193 | */ |
194 | void afs_break_callbacks(struct afs_server *server, size_t count, |
195 | struct afs_callback callbacks[]) |
196 | { |
197 | _enter("%p,%zu,", server, count); |
198 | |
199 | ASSERT(server != NULL); |
200 | ASSERTCMP(count, <=, AFSCBMAX); |
201 | |
202 | for (; count > 0; callbacks++, count--) { |
203 | _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", |
204 | callbacks->fid.vid, |
205 | callbacks->fid.vnode, |
206 | callbacks->fid.unique, |
207 | callbacks->version, |
208 | callbacks->expiry, |
209 | callbacks->type |
210 | ); |
211 | afs_break_one_callback(server, &callbacks->fid); |
212 | } |
213 | |
214 | _leave(""); |
215 | return; |
216 | } |
217 | |
218 | /* |
219 | * record the callback for breaking |
220 | * - the caller must hold server->cb_lock |
221 | */ |
222 | static void afs_do_give_up_callback(struct afs_server *server, |
223 | struct afs_vnode *vnode) |
224 | { |
225 | struct afs_callback *cb; |
226 | |
227 | _enter("%p,%p", server, vnode); |
228 | |
229 | cb = &server->cb_break[server->cb_break_head]; |
230 | cb->fid = vnode->fid; |
231 | cb->version = vnode->cb_version; |
232 | cb->expiry = vnode->cb_expiry; |
233 | cb->type = vnode->cb_type; |
234 | smp_wmb(); |
235 | server->cb_break_head = |
236 | (server->cb_break_head + 1) & |
237 | (ARRAY_SIZE(server->cb_break) - 1); |
238 | |
239 | /* defer the breaking of callbacks to try and collect as many as |
240 | * possible to ship in one operation */ |
241 | switch (atomic_inc_return(&server->cb_break_n)) { |
242 | case 1 ... AFSCBMAX - 1: |
243 | queue_delayed_work(afs_callback_update_worker, |
244 | &server->cb_break_work, HZ * 2); |
245 | break; |
246 | case AFSCBMAX: |
247 | afs_flush_callback_breaks(server); |
248 | break; |
249 | default: |
250 | break; |
251 | } |
252 | |
253 | ASSERT(server->cb_promises.rb_node != NULL); |
254 | rb_erase(&vnode->cb_promise, &server->cb_promises); |
255 | vnode->cb_promised = false; |
256 | _leave(""); |
257 | } |
258 | |
259 | /* |
260 | * discard the callback on a deleted item |
261 | */ |
262 | void afs_discard_callback_on_delete(struct afs_vnode *vnode) |
263 | { |
264 | struct afs_server *server = vnode->server; |
265 | |
266 | _enter("%d", vnode->cb_promised); |
267 | |
268 | if (!vnode->cb_promised) { |
269 | _leave(" [not promised]"); |
270 | return; |
271 | } |
272 | |
273 | ASSERT(server != NULL); |
274 | |
275 | spin_lock(&server->cb_lock); |
276 | if (vnode->cb_promised) { |
277 | ASSERT(server->cb_promises.rb_node != NULL); |
278 | rb_erase(&vnode->cb_promise, &server->cb_promises); |
279 | vnode->cb_promised = false; |
280 | } |
281 | spin_unlock(&server->cb_lock); |
282 | _leave(""); |
283 | } |
284 | |
285 | /* |
286 | * give up the callback registered for a vnode on the file server when the |
287 | * inode is being cleared |
288 | */ |
289 | void afs_give_up_callback(struct afs_vnode *vnode) |
290 | { |
291 | struct afs_server *server = vnode->server; |
292 | |
293 | DECLARE_WAITQUEUE(myself, current); |
294 | |
295 | _enter("%d", vnode->cb_promised); |
296 | |
297 | _debug("GIVE UP INODE %p", &vnode->vfs_inode); |
298 | |
299 | if (!vnode->cb_promised) { |
300 | _leave(" [not promised]"); |
301 | return; |
302 | } |
303 | |
304 | ASSERT(server != NULL); |
305 | |
306 | spin_lock(&server->cb_lock); |
307 | if (vnode->cb_promised && afs_breakring_space(server) == 0) { |
308 | add_wait_queue(&server->cb_break_waitq, &myself); |
309 | for (;;) { |
310 | set_current_state(TASK_UNINTERRUPTIBLE); |
311 | if (!vnode->cb_promised || |
312 | afs_breakring_space(server) != 0) |
313 | break; |
314 | spin_unlock(&server->cb_lock); |
315 | schedule(); |
316 | spin_lock(&server->cb_lock); |
317 | } |
318 | remove_wait_queue(&server->cb_break_waitq, &myself); |
319 | __set_current_state(TASK_RUNNING); |
320 | } |
321 | |
322 | /* of course, it's always possible for the server to break this vnode's |
323 | * callback first... */ |
324 | if (vnode->cb_promised) |
325 | afs_do_give_up_callback(server, vnode); |
326 | |
327 | spin_unlock(&server->cb_lock); |
328 | _leave(""); |
329 | } |
330 | |
331 | /* |
332 | * dispatch a deferred give up callbacks operation |
333 | */ |
334 | void afs_dispatch_give_up_callbacks(struct work_struct *work) |
335 | { |
336 | struct afs_server *server = |
337 | container_of(work, struct afs_server, cb_break_work.work); |
338 | |
339 | _enter(""); |
340 | |
341 | /* tell the fileserver to discard the callback promises it has |
342 | * - in the event of ENOMEM or some other error, we just forget that we |
343 | * had callbacks entirely, and the server will call us later to break |
344 | * them |
345 | */ |
346 | afs_fs_give_up_callbacks(server, &afs_async_call); |
347 | } |
348 | |
349 | /* |
350 | * flush the outstanding callback breaks on a server |
351 | */ |
352 | void afs_flush_callback_breaks(struct afs_server *server) |
353 | { |
354 | cancel_delayed_work(&server->cb_break_work); |
355 | queue_delayed_work(afs_callback_update_worker, |
356 | &server->cb_break_work, 0); |
357 | } |
358 | |
359 | #if 0 |
360 | /* |
361 | * update a bunch of callbacks |
362 | */ |
363 | static void afs_callback_updater(struct work_struct *work) |
364 | { |
365 | struct afs_server *server; |
366 | struct afs_vnode *vnode, *xvnode; |
367 | time_t now; |
368 | long timeout; |
369 | int ret; |
370 | |
371 | server = container_of(work, struct afs_server, updater); |
372 | |
373 | _enter(""); |
374 | |
375 | now = get_seconds(); |
376 | |
377 | /* find the first vnode to update */ |
378 | spin_lock(&server->cb_lock); |
379 | for (;;) { |
380 | if (RB_EMPTY_ROOT(&server->cb_promises)) { |
381 | spin_unlock(&server->cb_lock); |
382 | _leave(" [nothing]"); |
383 | return; |
384 | } |
385 | |
386 | vnode = rb_entry(rb_first(&server->cb_promises), |
387 | struct afs_vnode, cb_promise); |
388 | if (atomic_read(&vnode->usage) > 0) |
389 | break; |
390 | rb_erase(&vnode->cb_promise, &server->cb_promises); |
391 | vnode->cb_promised = false; |
392 | } |
393 | |
394 | timeout = vnode->update_at - now; |
395 | if (timeout > 0) { |
396 | queue_delayed_work(afs_vnode_update_worker, |
397 | &afs_vnode_update, timeout * HZ); |
398 | spin_unlock(&server->cb_lock); |
399 | _leave(" [nothing]"); |
400 | return; |
401 | } |
402 | |
403 | list_del_init(&vnode->update); |
404 | atomic_inc(&vnode->usage); |
405 | spin_unlock(&server->cb_lock); |
406 | |
407 | /* we can now perform the update */ |
408 | _debug("update %s", vnode->vldb.name); |
409 | vnode->state = AFS_VL_UPDATING; |
410 | vnode->upd_rej_cnt = 0; |
411 | vnode->upd_busy_cnt = 0; |
412 | |
413 | ret = afs_vnode_update_record(vl, &vldb); |
414 | switch (ret) { |
415 | case 0: |
416 | afs_vnode_apply_update(vl, &vldb); |
417 | vnode->state = AFS_VL_UPDATING; |
418 | break; |
419 | case -ENOMEDIUM: |
420 | vnode->state = AFS_VL_VOLUME_DELETED; |
421 | break; |
422 | default: |
423 | vnode->state = AFS_VL_UNCERTAIN; |
424 | break; |
425 | } |
426 | |
427 | /* and then reschedule */ |
428 | _debug("reschedule"); |
429 | vnode->update_at = get_seconds() + afs_vnode_update_timeout; |
430 | |
431 | spin_lock(&server->cb_lock); |
432 | |
433 | if (!list_empty(&server->cb_promises)) { |
434 | /* next update in 10 minutes, but wait at least 1 second more |
435 | * than the newest record already queued so that we don't spam |
436 | * the VL server suddenly with lots of requests |
437 | */ |
438 | xvnode = list_entry(server->cb_promises.prev, |
439 | struct afs_vnode, update); |
440 | if (vnode->update_at <= xvnode->update_at) |
441 | vnode->update_at = xvnode->update_at + 1; |
442 | xvnode = list_entry(server->cb_promises.next, |
443 | struct afs_vnode, update); |
444 | timeout = xvnode->update_at - now; |
445 | if (timeout < 0) |
446 | timeout = 0; |
447 | } else { |
448 | timeout = afs_vnode_update_timeout; |
449 | } |
450 | |
451 | list_add_tail(&vnode->update, &server->cb_promises); |
452 | |
453 | _debug("timeout %ld", timeout); |
454 | queue_delayed_work(afs_vnode_update_worker, |
455 | &afs_vnode_update, timeout * HZ); |
456 | spin_unlock(&server->cb_lock); |
457 | afs_put_vnode(vl); |
458 | } |
459 | #endif |
460 | |
461 | /* |
462 | * initialise the callback update process |
463 | */ |
464 | int __init afs_callback_update_init(void) |
465 | { |
466 | afs_callback_update_worker = |
467 | create_singlethread_workqueue("kafs_callbackd"); |
468 | return afs_callback_update_worker ? 0 : -ENOMEM; |
469 | } |
470 | |
471 | /* |
472 | * shut down the callback update process |
473 | */ |
474 | void afs_callback_update_kill(void) |
475 | { |
476 | destroy_workqueue(afs_callback_update_worker); |
477 | } |
478 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9