Root/
1 | /* Cache page management and data I/O routines |
2 | * |
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) |
5 | * |
6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. |
10 | */ |
11 | |
12 | #define FSCACHE_DEBUG_LEVEL PAGE |
13 | #include <linux/module.h> |
14 | #include <linux/fscache-cache.h> |
15 | #include <linux/buffer_head.h> |
16 | #include <linux/pagevec.h> |
17 | #include "internal.h" |
18 | |
19 | /* |
20 | * check to see if a page is being written to the cache |
21 | */ |
22 | bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) |
23 | { |
24 | void *val; |
25 | |
26 | rcu_read_lock(); |
27 | val = radix_tree_lookup(&cookie->stores, page->index); |
28 | rcu_read_unlock(); |
29 | |
30 | return val != NULL; |
31 | } |
32 | EXPORT_SYMBOL(__fscache_check_page_write); |
33 | |
34 | /* |
35 | * wait for a page to finish being written to the cache |
36 | */ |
37 | void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) |
38 | { |
39 | wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); |
40 | |
41 | wait_event(*wq, !__fscache_check_page_write(cookie, page)); |
42 | } |
43 | EXPORT_SYMBOL(__fscache_wait_on_page_write); |
44 | |
45 | /* |
46 | * note that a page has finished being written to the cache |
47 | */ |
48 | static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page) |
49 | { |
50 | struct page *xpage; |
51 | |
52 | spin_lock(&cookie->lock); |
53 | xpage = radix_tree_delete(&cookie->stores, page->index); |
54 | spin_unlock(&cookie->lock); |
55 | ASSERT(xpage != NULL); |
56 | |
57 | wake_up_bit(&cookie->flags, 0); |
58 | } |
59 | |
60 | /* |
61 | * actually apply the changed attributes to a cache object |
62 | */ |
63 | static void fscache_attr_changed_op(struct fscache_operation *op) |
64 | { |
65 | struct fscache_object *object = op->object; |
66 | |
67 | _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); |
68 | |
69 | fscache_stat(&fscache_n_attr_changed_calls); |
70 | |
71 | if (fscache_object_is_active(object) && |
72 | object->cache->ops->attr_changed(object) < 0) |
73 | fscache_abort_object(object); |
74 | |
75 | _leave(""); |
76 | } |
77 | |
78 | /* |
79 | * notification that the attributes on an object have changed |
80 | */ |
81 | int __fscache_attr_changed(struct fscache_cookie *cookie) |
82 | { |
83 | struct fscache_operation *op; |
84 | struct fscache_object *object; |
85 | |
86 | _enter("%p", cookie); |
87 | |
88 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
89 | |
90 | fscache_stat(&fscache_n_attr_changed); |
91 | |
92 | op = kzalloc(sizeof(*op), GFP_KERNEL); |
93 | if (!op) { |
94 | fscache_stat(&fscache_n_attr_changed_nomem); |
95 | _leave(" = -ENOMEM"); |
96 | return -ENOMEM; |
97 | } |
98 | |
99 | fscache_operation_init(op, NULL); |
100 | fscache_operation_init_slow(op, fscache_attr_changed_op); |
101 | op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); |
102 | |
103 | spin_lock(&cookie->lock); |
104 | |
105 | if (hlist_empty(&cookie->backing_objects)) |
106 | goto nobufs; |
107 | object = hlist_entry(cookie->backing_objects.first, |
108 | struct fscache_object, cookie_link); |
109 | |
110 | if (fscache_submit_exclusive_op(object, op) < 0) |
111 | goto nobufs; |
112 | spin_unlock(&cookie->lock); |
113 | fscache_stat(&fscache_n_attr_changed_ok); |
114 | fscache_put_operation(op); |
115 | _leave(" = 0"); |
116 | return 0; |
117 | |
118 | nobufs: |
119 | spin_unlock(&cookie->lock); |
120 | kfree(op); |
121 | fscache_stat(&fscache_n_attr_changed_nobufs); |
122 | _leave(" = %d", -ENOBUFS); |
123 | return -ENOBUFS; |
124 | } |
125 | EXPORT_SYMBOL(__fscache_attr_changed); |
126 | |
127 | /* |
128 | * handle secondary execution given to a retrieval op on behalf of the |
129 | * cache |
130 | */ |
131 | static void fscache_retrieval_work(struct work_struct *work) |
132 | { |
133 | struct fscache_retrieval *op = |
134 | container_of(work, struct fscache_retrieval, op.fast_work); |
135 | unsigned long start; |
136 | |
137 | _enter("{OP%x}", op->op.debug_id); |
138 | |
139 | start = jiffies; |
140 | op->op.processor(&op->op); |
141 | fscache_hist(fscache_ops_histogram, start); |
142 | fscache_put_operation(&op->op); |
143 | } |
144 | |
145 | /* |
146 | * release a retrieval op reference |
147 | */ |
148 | static void fscache_release_retrieval_op(struct fscache_operation *_op) |
149 | { |
150 | struct fscache_retrieval *op = |
151 | container_of(_op, struct fscache_retrieval, op); |
152 | |
153 | _enter("{OP%x}", op->op.debug_id); |
154 | |
155 | fscache_hist(fscache_retrieval_histogram, op->start_time); |
156 | if (op->context) |
157 | fscache_put_context(op->op.object->cookie, op->context); |
158 | |
159 | _leave(""); |
160 | } |
161 | |
162 | /* |
163 | * allocate a retrieval op |
164 | */ |
165 | static struct fscache_retrieval *fscache_alloc_retrieval( |
166 | struct address_space *mapping, |
167 | fscache_rw_complete_t end_io_func, |
168 | void *context) |
169 | { |
170 | struct fscache_retrieval *op; |
171 | |
172 | /* allocate a retrieval operation and attempt to submit it */ |
173 | op = kzalloc(sizeof(*op), GFP_NOIO); |
174 | if (!op) { |
175 | fscache_stat(&fscache_n_retrievals_nomem); |
176 | return NULL; |
177 | } |
178 | |
179 | fscache_operation_init(&op->op, fscache_release_retrieval_op); |
180 | op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); |
181 | op->mapping = mapping; |
182 | op->end_io_func = end_io_func; |
183 | op->context = context; |
184 | op->start_time = jiffies; |
185 | INIT_WORK(&op->op.fast_work, fscache_retrieval_work); |
186 | INIT_LIST_HEAD(&op->to_do); |
187 | return op; |
188 | } |
189 | |
190 | /* |
191 | * wait for a deferred lookup to complete |
192 | */ |
193 | static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) |
194 | { |
195 | unsigned long jif; |
196 | |
197 | _enter(""); |
198 | |
199 | if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { |
200 | _leave(" = 0 [imm]"); |
201 | return 0; |
202 | } |
203 | |
204 | fscache_stat(&fscache_n_retrievals_wait); |
205 | |
206 | jif = jiffies; |
207 | if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, |
208 | fscache_wait_bit_interruptible, |
209 | TASK_INTERRUPTIBLE) != 0) { |
210 | fscache_stat(&fscache_n_retrievals_intr); |
211 | _leave(" = -ERESTARTSYS"); |
212 | return -ERESTARTSYS; |
213 | } |
214 | |
215 | ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); |
216 | |
217 | smp_rmb(); |
218 | fscache_hist(fscache_retrieval_delay_histogram, jif); |
219 | _leave(" = 0 [dly]"); |
220 | return 0; |
221 | } |
222 | |
223 | /* |
224 | * read a page from the cache or allocate a block in which to store it |
225 | * - we return: |
226 | * -ENOMEM - out of memory, nothing done |
227 | * -ERESTARTSYS - interrupted |
228 | * -ENOBUFS - no backing object available in which to cache the block |
229 | * -ENODATA - no data available in the backing object for this block |
230 | * 0 - dispatched a read - it'll call end_io_func() when finished |
231 | */ |
232 | int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, |
233 | struct page *page, |
234 | fscache_rw_complete_t end_io_func, |
235 | void *context, |
236 | gfp_t gfp) |
237 | { |
238 | struct fscache_retrieval *op; |
239 | struct fscache_object *object; |
240 | int ret; |
241 | |
242 | _enter("%p,%p,,,", cookie, page); |
243 | |
244 | fscache_stat(&fscache_n_retrievals); |
245 | |
246 | if (hlist_empty(&cookie->backing_objects)) |
247 | goto nobufs; |
248 | |
249 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
250 | ASSERTCMP(page, !=, NULL); |
251 | |
252 | if (fscache_wait_for_deferred_lookup(cookie) < 0) |
253 | return -ERESTARTSYS; |
254 | |
255 | op = fscache_alloc_retrieval(page->mapping, end_io_func, context); |
256 | if (!op) { |
257 | _leave(" = -ENOMEM"); |
258 | return -ENOMEM; |
259 | } |
260 | |
261 | spin_lock(&cookie->lock); |
262 | |
263 | if (hlist_empty(&cookie->backing_objects)) |
264 | goto nobufs_unlock; |
265 | object = hlist_entry(cookie->backing_objects.first, |
266 | struct fscache_object, cookie_link); |
267 | |
268 | ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); |
269 | |
270 | if (fscache_submit_op(object, &op->op) < 0) |
271 | goto nobufs_unlock; |
272 | spin_unlock(&cookie->lock); |
273 | |
274 | fscache_stat(&fscache_n_retrieval_ops); |
275 | |
276 | /* pin the netfs read context in case we need to do the actual netfs |
277 | * read because we've encountered a cache read failure */ |
278 | fscache_get_context(object->cookie, op->context); |
279 | |
280 | /* we wait for the operation to become active, and then process it |
281 | * *here*, in this thread, and not in the thread pool */ |
282 | if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { |
283 | _debug(">>> WT"); |
284 | fscache_stat(&fscache_n_retrieval_op_waits); |
285 | wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, |
286 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); |
287 | _debug("<<< GO"); |
288 | } |
289 | |
290 | /* ask the cache to honour the operation */ |
291 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { |
292 | ret = object->cache->ops->allocate_page(op, page, gfp); |
293 | if (ret == 0) |
294 | ret = -ENODATA; |
295 | } else { |
296 | ret = object->cache->ops->read_or_alloc_page(op, page, gfp); |
297 | } |
298 | |
299 | if (ret == -ENOMEM) |
300 | fscache_stat(&fscache_n_retrievals_nomem); |
301 | else if (ret == -ERESTARTSYS) |
302 | fscache_stat(&fscache_n_retrievals_intr); |
303 | else if (ret == -ENODATA) |
304 | fscache_stat(&fscache_n_retrievals_nodata); |
305 | else if (ret < 0) |
306 | fscache_stat(&fscache_n_retrievals_nobufs); |
307 | else |
308 | fscache_stat(&fscache_n_retrievals_ok); |
309 | |
310 | fscache_put_retrieval(op); |
311 | _leave(" = %d", ret); |
312 | return ret; |
313 | |
314 | nobufs_unlock: |
315 | spin_unlock(&cookie->lock); |
316 | kfree(op); |
317 | nobufs: |
318 | fscache_stat(&fscache_n_retrievals_nobufs); |
319 | _leave(" = -ENOBUFS"); |
320 | return -ENOBUFS; |
321 | } |
322 | EXPORT_SYMBOL(__fscache_read_or_alloc_page); |
323 | |
324 | /* |
325 | * read a list of page from the cache or allocate a block in which to store |
326 | * them |
327 | * - we return: |
328 | * -ENOMEM - out of memory, some pages may be being read |
329 | * -ERESTARTSYS - interrupted, some pages may be being read |
330 | * -ENOBUFS - no backing object or space available in which to cache any |
331 | * pages not being read |
332 | * -ENODATA - no data available in the backing object for some or all of |
333 | * the pages |
334 | * 0 - dispatched a read on all pages |
335 | * |
336 | * end_io_func() will be called for each page read from the cache as it is |
337 | * finishes being read |
338 | * |
339 | * any pages for which a read is dispatched will be removed from pages and |
340 | * nr_pages |
341 | */ |
342 | int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, |
343 | struct address_space *mapping, |
344 | struct list_head *pages, |
345 | unsigned *nr_pages, |
346 | fscache_rw_complete_t end_io_func, |
347 | void *context, |
348 | gfp_t gfp) |
349 | { |
350 | fscache_pages_retrieval_func_t func; |
351 | struct fscache_retrieval *op; |
352 | struct fscache_object *object; |
353 | int ret; |
354 | |
355 | _enter("%p,,%d,,,", cookie, *nr_pages); |
356 | |
357 | fscache_stat(&fscache_n_retrievals); |
358 | |
359 | if (hlist_empty(&cookie->backing_objects)) |
360 | goto nobufs; |
361 | |
362 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
363 | ASSERTCMP(*nr_pages, >, 0); |
364 | ASSERT(!list_empty(pages)); |
365 | |
366 | if (fscache_wait_for_deferred_lookup(cookie) < 0) |
367 | return -ERESTARTSYS; |
368 | |
369 | op = fscache_alloc_retrieval(mapping, end_io_func, context); |
370 | if (!op) |
371 | return -ENOMEM; |
372 | |
373 | spin_lock(&cookie->lock); |
374 | |
375 | if (hlist_empty(&cookie->backing_objects)) |
376 | goto nobufs_unlock; |
377 | object = hlist_entry(cookie->backing_objects.first, |
378 | struct fscache_object, cookie_link); |
379 | |
380 | if (fscache_submit_op(object, &op->op) < 0) |
381 | goto nobufs_unlock; |
382 | spin_unlock(&cookie->lock); |
383 | |
384 | fscache_stat(&fscache_n_retrieval_ops); |
385 | |
386 | /* pin the netfs read context in case we need to do the actual netfs |
387 | * read because we've encountered a cache read failure */ |
388 | fscache_get_context(object->cookie, op->context); |
389 | |
390 | /* we wait for the operation to become active, and then process it |
391 | * *here*, in this thread, and not in the thread pool */ |
392 | if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { |
393 | _debug(">>> WT"); |
394 | fscache_stat(&fscache_n_retrieval_op_waits); |
395 | wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, |
396 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); |
397 | _debug("<<< GO"); |
398 | } |
399 | |
400 | /* ask the cache to honour the operation */ |
401 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) |
402 | func = object->cache->ops->allocate_pages; |
403 | else |
404 | func = object->cache->ops->read_or_alloc_pages; |
405 | ret = func(op, pages, nr_pages, gfp); |
406 | |
407 | if (ret == -ENOMEM) |
408 | fscache_stat(&fscache_n_retrievals_nomem); |
409 | else if (ret == -ERESTARTSYS) |
410 | fscache_stat(&fscache_n_retrievals_intr); |
411 | else if (ret == -ENODATA) |
412 | fscache_stat(&fscache_n_retrievals_nodata); |
413 | else if (ret < 0) |
414 | fscache_stat(&fscache_n_retrievals_nobufs); |
415 | else |
416 | fscache_stat(&fscache_n_retrievals_ok); |
417 | |
418 | fscache_put_retrieval(op); |
419 | _leave(" = %d", ret); |
420 | return ret; |
421 | |
422 | nobufs_unlock: |
423 | spin_unlock(&cookie->lock); |
424 | kfree(op); |
425 | nobufs: |
426 | fscache_stat(&fscache_n_retrievals_nobufs); |
427 | _leave(" = -ENOBUFS"); |
428 | return -ENOBUFS; |
429 | } |
430 | EXPORT_SYMBOL(__fscache_read_or_alloc_pages); |
431 | |
432 | /* |
433 | * allocate a block in the cache on which to store a page |
434 | * - we return: |
435 | * -ENOMEM - out of memory, nothing done |
436 | * -ERESTARTSYS - interrupted |
437 | * -ENOBUFS - no backing object available in which to cache the block |
438 | * 0 - block allocated |
439 | */ |
440 | int __fscache_alloc_page(struct fscache_cookie *cookie, |
441 | struct page *page, |
442 | gfp_t gfp) |
443 | { |
444 | struct fscache_retrieval *op; |
445 | struct fscache_object *object; |
446 | int ret; |
447 | |
448 | _enter("%p,%p,,,", cookie, page); |
449 | |
450 | fscache_stat(&fscache_n_allocs); |
451 | |
452 | if (hlist_empty(&cookie->backing_objects)) |
453 | goto nobufs; |
454 | |
455 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
456 | ASSERTCMP(page, !=, NULL); |
457 | |
458 | if (fscache_wait_for_deferred_lookup(cookie) < 0) |
459 | return -ERESTARTSYS; |
460 | |
461 | op = fscache_alloc_retrieval(page->mapping, NULL, NULL); |
462 | if (!op) |
463 | return -ENOMEM; |
464 | |
465 | spin_lock(&cookie->lock); |
466 | |
467 | if (hlist_empty(&cookie->backing_objects)) |
468 | goto nobufs_unlock; |
469 | object = hlist_entry(cookie->backing_objects.first, |
470 | struct fscache_object, cookie_link); |
471 | |
472 | if (fscache_submit_op(object, &op->op) < 0) |
473 | goto nobufs_unlock; |
474 | spin_unlock(&cookie->lock); |
475 | |
476 | fscache_stat(&fscache_n_alloc_ops); |
477 | |
478 | if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { |
479 | _debug(">>> WT"); |
480 | fscache_stat(&fscache_n_alloc_op_waits); |
481 | wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, |
482 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); |
483 | _debug("<<< GO"); |
484 | } |
485 | |
486 | /* ask the cache to honour the operation */ |
487 | ret = object->cache->ops->allocate_page(op, page, gfp); |
488 | |
489 | if (ret < 0) |
490 | fscache_stat(&fscache_n_allocs_nobufs); |
491 | else |
492 | fscache_stat(&fscache_n_allocs_ok); |
493 | |
494 | fscache_put_retrieval(op); |
495 | _leave(" = %d", ret); |
496 | return ret; |
497 | |
498 | nobufs_unlock: |
499 | spin_unlock(&cookie->lock); |
500 | kfree(op); |
501 | nobufs: |
502 | fscache_stat(&fscache_n_allocs_nobufs); |
503 | _leave(" = -ENOBUFS"); |
504 | return -ENOBUFS; |
505 | } |
506 | EXPORT_SYMBOL(__fscache_alloc_page); |
507 | |
508 | /* |
509 | * release a write op reference |
510 | */ |
511 | static void fscache_release_write_op(struct fscache_operation *_op) |
512 | { |
513 | _enter("{OP%x}", _op->debug_id); |
514 | } |
515 | |
516 | /* |
517 | * perform the background storage of a page into the cache |
518 | */ |
519 | static void fscache_write_op(struct fscache_operation *_op) |
520 | { |
521 | struct fscache_storage *op = |
522 | container_of(_op, struct fscache_storage, op); |
523 | struct fscache_object *object = op->op.object; |
524 | struct fscache_cookie *cookie = object->cookie; |
525 | struct page *page; |
526 | unsigned n; |
527 | void *results[1]; |
528 | int ret; |
529 | |
530 | _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); |
531 | |
532 | spin_lock(&cookie->lock); |
533 | spin_lock(&object->lock); |
534 | |
535 | if (!fscache_object_is_active(object)) { |
536 | spin_unlock(&object->lock); |
537 | spin_unlock(&cookie->lock); |
538 | _leave(""); |
539 | return; |
540 | } |
541 | |
542 | fscache_stat(&fscache_n_store_calls); |
543 | |
544 | /* find a page to store */ |
545 | page = NULL; |
546 | n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, |
547 | FSCACHE_COOKIE_PENDING_TAG); |
548 | if (n != 1) |
549 | goto superseded; |
550 | page = results[0]; |
551 | _debug("gang %d [%lx]", n, page->index); |
552 | if (page->index > op->store_limit) |
553 | goto superseded; |
554 | |
555 | radix_tree_tag_clear(&cookie->stores, page->index, |
556 | FSCACHE_COOKIE_PENDING_TAG); |
557 | |
558 | spin_unlock(&object->lock); |
559 | spin_unlock(&cookie->lock); |
560 | |
561 | if (page) { |
562 | ret = object->cache->ops->write_page(op, page); |
563 | fscache_end_page_write(cookie, page); |
564 | page_cache_release(page); |
565 | if (ret < 0) |
566 | fscache_abort_object(object); |
567 | else |
568 | fscache_enqueue_operation(&op->op); |
569 | } |
570 | |
571 | _leave(""); |
572 | return; |
573 | |
574 | superseded: |
575 | /* this writer is going away and there aren't any more things to |
576 | * write */ |
577 | _debug("cease"); |
578 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); |
579 | spin_unlock(&object->lock); |
580 | spin_unlock(&cookie->lock); |
581 | _leave(""); |
582 | } |
583 | |
584 | /* |
585 | * request a page be stored in the cache |
586 | * - returns: |
587 | * -ENOMEM - out of memory, nothing done |
588 | * -ENOBUFS - no backing object available in which to cache the page |
589 | * 0 - dispatched a write - it'll call end_io_func() when finished |
590 | * |
591 | * if the cookie still has a backing object at this point, that object can be |
592 | * in one of a few states with respect to storage processing: |
593 | * |
594 | * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is |
595 | * set) |
596 | * |
597 | * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred |
598 | * fill op) |
599 | * |
600 | * (b) writes deferred till post-creation (mark page for writing and |
601 | * return immediately) |
602 | * |
603 | * (2) negative lookup, object created, initial fill being made from netfs |
604 | * (FSCACHE_COOKIE_INITIAL_FILL is set) |
605 | * |
606 | * (a) fill point not yet reached this page (mark page for writing and |
607 | * return) |
608 | * |
609 | * (b) fill point passed this page (queue op to store this page) |
610 | * |
611 | * (3) object extant (queue op to store this page) |
612 | * |
613 | * any other state is invalid |
614 | */ |
615 | int __fscache_write_page(struct fscache_cookie *cookie, |
616 | struct page *page, |
617 | gfp_t gfp) |
618 | { |
619 | struct fscache_storage *op; |
620 | struct fscache_object *object; |
621 | int ret; |
622 | |
623 | _enter("%p,%x,", cookie, (u32) page->flags); |
624 | |
625 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
626 | ASSERT(PageFsCache(page)); |
627 | |
628 | fscache_stat(&fscache_n_stores); |
629 | |
630 | op = kzalloc(sizeof(*op), GFP_NOIO); |
631 | if (!op) |
632 | goto nomem; |
633 | |
634 | fscache_operation_init(&op->op, fscache_release_write_op); |
635 | fscache_operation_init_slow(&op->op, fscache_write_op); |
636 | op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); |
637 | |
638 | ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); |
639 | if (ret < 0) |
640 | goto nomem_free; |
641 | |
642 | ret = -ENOBUFS; |
643 | spin_lock(&cookie->lock); |
644 | |
645 | if (hlist_empty(&cookie->backing_objects)) |
646 | goto nobufs; |
647 | object = hlist_entry(cookie->backing_objects.first, |
648 | struct fscache_object, cookie_link); |
649 | if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) |
650 | goto nobufs; |
651 | |
652 | /* add the page to the pending-storage radix tree on the backing |
653 | * object */ |
654 | spin_lock(&object->lock); |
655 | |
656 | _debug("store limit %llx", (unsigned long long) object->store_limit); |
657 | |
658 | ret = radix_tree_insert(&cookie->stores, page->index, page); |
659 | if (ret < 0) { |
660 | if (ret == -EEXIST) |
661 | goto already_queued; |
662 | _debug("insert failed %d", ret); |
663 | goto nobufs_unlock_obj; |
664 | } |
665 | |
666 | radix_tree_tag_set(&cookie->stores, page->index, |
667 | FSCACHE_COOKIE_PENDING_TAG); |
668 | page_cache_get(page); |
669 | |
670 | /* we only want one writer at a time, but we do need to queue new |
671 | * writers after exclusive ops */ |
672 | if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) |
673 | goto already_pending; |
674 | |
675 | spin_unlock(&object->lock); |
676 | |
677 | op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); |
678 | op->store_limit = object->store_limit; |
679 | |
680 | if (fscache_submit_op(object, &op->op) < 0) |
681 | goto submit_failed; |
682 | |
683 | spin_unlock(&cookie->lock); |
684 | radix_tree_preload_end(); |
685 | fscache_stat(&fscache_n_store_ops); |
686 | fscache_stat(&fscache_n_stores_ok); |
687 | |
688 | /* the slow work queue now carries its own ref on the object */ |
689 | fscache_put_operation(&op->op); |
690 | _leave(" = 0"); |
691 | return 0; |
692 | |
693 | already_queued: |
694 | fscache_stat(&fscache_n_stores_again); |
695 | already_pending: |
696 | spin_unlock(&object->lock); |
697 | spin_unlock(&cookie->lock); |
698 | radix_tree_preload_end(); |
699 | kfree(op); |
700 | fscache_stat(&fscache_n_stores_ok); |
701 | _leave(" = 0"); |
702 | return 0; |
703 | |
704 | submit_failed: |
705 | radix_tree_delete(&cookie->stores, page->index); |
706 | page_cache_release(page); |
707 | ret = -ENOBUFS; |
708 | goto nobufs; |
709 | |
710 | nobufs_unlock_obj: |
711 | spin_unlock(&object->lock); |
712 | nobufs: |
713 | spin_unlock(&cookie->lock); |
714 | radix_tree_preload_end(); |
715 | kfree(op); |
716 | fscache_stat(&fscache_n_stores_nobufs); |
717 | _leave(" = -ENOBUFS"); |
718 | return -ENOBUFS; |
719 | |
720 | nomem_free: |
721 | kfree(op); |
722 | nomem: |
723 | fscache_stat(&fscache_n_stores_oom); |
724 | _leave(" = -ENOMEM"); |
725 | return -ENOMEM; |
726 | } |
727 | EXPORT_SYMBOL(__fscache_write_page); |
728 | |
729 | /* |
730 | * remove a page from the cache |
731 | */ |
732 | void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) |
733 | { |
734 | struct fscache_object *object; |
735 | |
736 | _enter(",%p", page); |
737 | |
738 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); |
739 | ASSERTCMP(page, !=, NULL); |
740 | |
741 | fscache_stat(&fscache_n_uncaches); |
742 | |
743 | /* cache withdrawal may beat us to it */ |
744 | if (!PageFsCache(page)) |
745 | goto done; |
746 | |
747 | /* get the object */ |
748 | spin_lock(&cookie->lock); |
749 | |
750 | if (hlist_empty(&cookie->backing_objects)) { |
751 | ClearPageFsCache(page); |
752 | goto done_unlock; |
753 | } |
754 | |
755 | object = hlist_entry(cookie->backing_objects.first, |
756 | struct fscache_object, cookie_link); |
757 | |
758 | /* there might now be stuff on disk we could read */ |
759 | clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); |
760 | |
761 | /* only invoke the cache backend if we managed to mark the page |
762 | * uncached here; this deals with synchronisation vs withdrawal */ |
763 | if (TestClearPageFsCache(page) && |
764 | object->cache->ops->uncache_page) { |
765 | /* the cache backend releases the cookie lock */ |
766 | object->cache->ops->uncache_page(object, page); |
767 | goto done; |
768 | } |
769 | |
770 | done_unlock: |
771 | spin_unlock(&cookie->lock); |
772 | done: |
773 | _leave(""); |
774 | } |
775 | EXPORT_SYMBOL(__fscache_uncache_page); |
776 | |
777 | /** |
778 | * fscache_mark_pages_cached - Mark pages as being cached |
779 | * @op: The retrieval op pages are being marked for |
780 | * @pagevec: The pages to be marked |
781 | * |
782 | * Mark a bunch of netfs pages as being cached. After this is called, |
783 | * the netfs must call fscache_uncache_page() to remove the mark. |
784 | */ |
785 | void fscache_mark_pages_cached(struct fscache_retrieval *op, |
786 | struct pagevec *pagevec) |
787 | { |
788 | struct fscache_cookie *cookie = op->op.object->cookie; |
789 | unsigned long loop; |
790 | |
791 | #ifdef CONFIG_FSCACHE_STATS |
792 | atomic_add(pagevec->nr, &fscache_n_marks); |
793 | #endif |
794 | |
795 | for (loop = 0; loop < pagevec->nr; loop++) { |
796 | struct page *page = pagevec->pages[loop]; |
797 | |
798 | _debug("- mark %p{%lx}", page, page->index); |
799 | if (TestSetPageFsCache(page)) { |
800 | static bool once_only; |
801 | if (!once_only) { |
802 | once_only = true; |
803 | printk(KERN_WARNING "FS-Cache:" |
804 | " Cookie type %s marked page %lx" |
805 | " multiple times\n", |
806 | cookie->def->name, page->index); |
807 | } |
808 | } |
809 | } |
810 | |
811 | if (cookie->def->mark_pages_cached) |
812 | cookie->def->mark_pages_cached(cookie->netfs_data, |
813 | op->mapping, pagevec); |
814 | pagevec_reinit(pagevec); |
815 | } |
816 | EXPORT_SYMBOL(fscache_mark_pages_cached); |
817 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9