Root/
1 | /* |
2 | * linux/fs/mbcache.c |
3 | * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org> |
4 | */ |
5 | |
6 | /* |
7 | * Filesystem Meta Information Block Cache (mbcache) |
8 | * |
9 | * The mbcache caches blocks of block devices that need to be located |
10 | * by their device/block number, as well as by other criteria (such |
11 | * as the block's contents). |
12 | * |
13 | * There can only be one cache entry in a cache per device and block number. |
14 | * Additional indexes need not be unique in this sense. The number of |
15 | * additional indexes (=other criteria) can be hardwired at compile time |
16 | * or specified at cache create time. |
17 | * |
18 | * Each cache entry is of fixed size. An entry may be `valid' or `invalid' |
19 | * in the cache. A valid entry is in the main hash tables of the cache, |
20 | * and may also be in the lru list. An invalid entry is not in any hashes |
21 | * or lists. |
22 | * |
23 | * A valid cache entry is only in the lru list if no handles refer to it. |
24 | * Invalid cache entries will be freed when the last handle to the cache |
25 | * entry is released. Entries that cannot be freed immediately are put |
26 | * back on the lru list. |
27 | */ |
28 | |
29 | #include <linux/kernel.h> |
30 | #include <linux/module.h> |
31 | |
32 | #include <linux/hash.h> |
33 | #include <linux/fs.h> |
34 | #include <linux/mm.h> |
35 | #include <linux/slab.h> |
36 | #include <linux/sched.h> |
37 | #include <linux/init.h> |
38 | #include <linux/mbcache.h> |
39 | |
40 | |
41 | #ifdef MB_CACHE_DEBUG |
42 | # define mb_debug(f...) do { \ |
43 | printk(KERN_DEBUG f); \ |
44 | printk("\n"); \ |
45 | } while (0) |
46 | #define mb_assert(c) do { if (!(c)) \ |
47 | printk(KERN_ERR "assertion " #c " failed\n"); \ |
48 | } while(0) |
49 | #else |
50 | # define mb_debug(f...) do { } while(0) |
51 | # define mb_assert(c) do { } while(0) |
52 | #endif |
53 | #define mb_error(f...) do { \ |
54 | printk(KERN_ERR f); \ |
55 | printk("\n"); \ |
56 | } while(0) |
57 | |
58 | #define MB_CACHE_WRITER ((unsigned short)~0U >> 1) |
59 | |
60 | static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue); |
61 | |
62 | MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>"); |
63 | MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); |
64 | MODULE_LICENSE("GPL"); |
65 | |
66 | EXPORT_SYMBOL(mb_cache_create); |
67 | EXPORT_SYMBOL(mb_cache_shrink); |
68 | EXPORT_SYMBOL(mb_cache_destroy); |
69 | EXPORT_SYMBOL(mb_cache_entry_alloc); |
70 | EXPORT_SYMBOL(mb_cache_entry_insert); |
71 | EXPORT_SYMBOL(mb_cache_entry_release); |
72 | EXPORT_SYMBOL(mb_cache_entry_free); |
73 | EXPORT_SYMBOL(mb_cache_entry_get); |
74 | #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) |
75 | EXPORT_SYMBOL(mb_cache_entry_find_first); |
76 | EXPORT_SYMBOL(mb_cache_entry_find_next); |
77 | #endif |
78 | |
79 | struct mb_cache { |
80 | struct list_head c_cache_list; |
81 | const char *c_name; |
82 | atomic_t c_entry_count; |
83 | int c_max_entries; |
84 | int c_bucket_bits; |
85 | struct kmem_cache *c_entry_cache; |
86 | struct list_head *c_block_hash; |
87 | struct list_head *c_index_hash; |
88 | }; |
89 | |
90 | |
91 | /* |
92 | * Global data: list of all mbcache's, lru list, and a spinlock for |
93 | * accessing cache data structures on SMP machines. The lru list is |
94 | * global across all mbcaches. |
95 | */ |
96 | |
97 | static LIST_HEAD(mb_cache_list); |
98 | static LIST_HEAD(mb_cache_lru_list); |
99 | static DEFINE_SPINLOCK(mb_cache_spinlock); |
100 | |
101 | /* |
102 | * What the mbcache registers as to get shrunk dynamically. |
103 | */ |
104 | |
105 | static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); |
106 | |
107 | static struct shrinker mb_cache_shrinker = { |
108 | .shrink = mb_cache_shrink_fn, |
109 | .seeks = DEFAULT_SEEKS, |
110 | }; |
111 | |
112 | static inline int |
113 | __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) |
114 | { |
115 | return !list_empty(&ce->e_block_list); |
116 | } |
117 | |
118 | |
119 | static void |
120 | __mb_cache_entry_unhash(struct mb_cache_entry *ce) |
121 | { |
122 | if (__mb_cache_entry_is_hashed(ce)) { |
123 | list_del_init(&ce->e_block_list); |
124 | list_del(&ce->e_index.o_list); |
125 | } |
126 | } |
127 | |
128 | |
129 | static void |
130 | __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) |
131 | { |
132 | struct mb_cache *cache = ce->e_cache; |
133 | |
134 | mb_assert(!(ce->e_used || ce->e_queued)); |
135 | kmem_cache_free(cache->c_entry_cache, ce); |
136 | atomic_dec(&cache->c_entry_count); |
137 | } |
138 | |
139 | |
140 | static void |
141 | __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) |
142 | __releases(mb_cache_spinlock) |
143 | { |
144 | /* Wake up all processes queuing for this cache entry. */ |
145 | if (ce->e_queued) |
146 | wake_up_all(&mb_cache_queue); |
147 | if (ce->e_used >= MB_CACHE_WRITER) |
148 | ce->e_used -= MB_CACHE_WRITER; |
149 | ce->e_used--; |
150 | if (!(ce->e_used || ce->e_queued)) { |
151 | if (!__mb_cache_entry_is_hashed(ce)) |
152 | goto forget; |
153 | mb_assert(list_empty(&ce->e_lru_list)); |
154 | list_add_tail(&ce->e_lru_list, &mb_cache_lru_list); |
155 | } |
156 | spin_unlock(&mb_cache_spinlock); |
157 | return; |
158 | forget: |
159 | spin_unlock(&mb_cache_spinlock); |
160 | __mb_cache_entry_forget(ce, GFP_KERNEL); |
161 | } |
162 | |
163 | |
164 | /* |
165 | * mb_cache_shrink_fn() memory pressure callback |
166 | * |
167 | * This function is called by the kernel memory management when memory |
168 | * gets low. |
169 | * |
170 | * @shrink: (ignored) |
171 | * @nr_to_scan: Number of objects to scan |
172 | * @gfp_mask: (ignored) |
173 | * |
174 | * Returns the number of objects which are present in the cache. |
175 | */ |
176 | static int |
177 | mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
178 | { |
179 | LIST_HEAD(free_list); |
180 | struct mb_cache *cache; |
181 | struct mb_cache_entry *entry, *tmp; |
182 | int count = 0; |
183 | |
184 | mb_debug("trying to free %d entries", nr_to_scan); |
185 | spin_lock(&mb_cache_spinlock); |
186 | while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { |
187 | struct mb_cache_entry *ce = |
188 | list_entry(mb_cache_lru_list.next, |
189 | struct mb_cache_entry, e_lru_list); |
190 | list_move_tail(&ce->e_lru_list, &free_list); |
191 | __mb_cache_entry_unhash(ce); |
192 | } |
193 | list_for_each_entry(cache, &mb_cache_list, c_cache_list) { |
194 | mb_debug("cache %s (%d)", cache->c_name, |
195 | atomic_read(&cache->c_entry_count)); |
196 | count += atomic_read(&cache->c_entry_count); |
197 | } |
198 | spin_unlock(&mb_cache_spinlock); |
199 | list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { |
200 | __mb_cache_entry_forget(entry, gfp_mask); |
201 | } |
202 | return (count / 100) * sysctl_vfs_cache_pressure; |
203 | } |
204 | |
205 | |
206 | /* |
207 | * mb_cache_create() create a new cache |
208 | * |
209 | * All entries in one cache are equal size. Cache entries may be from |
210 | * multiple devices. If this is the first mbcache created, registers |
211 | * the cache with kernel memory management. Returns NULL if no more |
212 | * memory was available. |
213 | * |
214 | * @name: name of the cache (informal) |
215 | * @bucket_bits: log2(number of hash buckets) |
216 | */ |
217 | struct mb_cache * |
218 | mb_cache_create(const char *name, int bucket_bits) |
219 | { |
220 | int n, bucket_count = 1 << bucket_bits; |
221 | struct mb_cache *cache = NULL; |
222 | |
223 | cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL); |
224 | if (!cache) |
225 | return NULL; |
226 | cache->c_name = name; |
227 | atomic_set(&cache->c_entry_count, 0); |
228 | cache->c_bucket_bits = bucket_bits; |
229 | cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head), |
230 | GFP_KERNEL); |
231 | if (!cache->c_block_hash) |
232 | goto fail; |
233 | for (n=0; n<bucket_count; n++) |
234 | INIT_LIST_HEAD(&cache->c_block_hash[n]); |
235 | cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head), |
236 | GFP_KERNEL); |
237 | if (!cache->c_index_hash) |
238 | goto fail; |
239 | for (n=0; n<bucket_count; n++) |
240 | INIT_LIST_HEAD(&cache->c_index_hash[n]); |
241 | cache->c_entry_cache = kmem_cache_create(name, |
242 | sizeof(struct mb_cache_entry), 0, |
243 | SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); |
244 | if (!cache->c_entry_cache) |
245 | goto fail2; |
246 | |
247 | /* |
248 | * Set an upper limit on the number of cache entries so that the hash |
249 | * chains won't grow too long. |
250 | */ |
251 | cache->c_max_entries = bucket_count << 4; |
252 | |
253 | spin_lock(&mb_cache_spinlock); |
254 | list_add(&cache->c_cache_list, &mb_cache_list); |
255 | spin_unlock(&mb_cache_spinlock); |
256 | return cache; |
257 | |
258 | fail2: |
259 | kfree(cache->c_index_hash); |
260 | |
261 | fail: |
262 | kfree(cache->c_block_hash); |
263 | kfree(cache); |
264 | return NULL; |
265 | } |
266 | |
267 | |
268 | /* |
269 | * mb_cache_shrink() |
270 | * |
271 | * Removes all cache entries of a device from the cache. All cache entries |
272 | * currently in use cannot be freed, and thus remain in the cache. All others |
273 | * are freed. |
274 | * |
275 | * @bdev: which device's cache entries to shrink |
276 | */ |
277 | void |
278 | mb_cache_shrink(struct block_device *bdev) |
279 | { |
280 | LIST_HEAD(free_list); |
281 | struct list_head *l, *ltmp; |
282 | |
283 | spin_lock(&mb_cache_spinlock); |
284 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { |
285 | struct mb_cache_entry *ce = |
286 | list_entry(l, struct mb_cache_entry, e_lru_list); |
287 | if (ce->e_bdev == bdev) { |
288 | list_move_tail(&ce->e_lru_list, &free_list); |
289 | __mb_cache_entry_unhash(ce); |
290 | } |
291 | } |
292 | spin_unlock(&mb_cache_spinlock); |
293 | list_for_each_safe(l, ltmp, &free_list) { |
294 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, |
295 | e_lru_list), GFP_KERNEL); |
296 | } |
297 | } |
298 | |
299 | |
300 | /* |
301 | * mb_cache_destroy() |
302 | * |
303 | * Shrinks the cache to its minimum possible size (hopefully 0 entries), |
304 | * and then destroys it. If this was the last mbcache, un-registers the |
305 | * mbcache from kernel memory management. |
306 | */ |
307 | void |
308 | mb_cache_destroy(struct mb_cache *cache) |
309 | { |
310 | LIST_HEAD(free_list); |
311 | struct list_head *l, *ltmp; |
312 | |
313 | spin_lock(&mb_cache_spinlock); |
314 | list_for_each_safe(l, ltmp, &mb_cache_lru_list) { |
315 | struct mb_cache_entry *ce = |
316 | list_entry(l, struct mb_cache_entry, e_lru_list); |
317 | if (ce->e_cache == cache) { |
318 | list_move_tail(&ce->e_lru_list, &free_list); |
319 | __mb_cache_entry_unhash(ce); |
320 | } |
321 | } |
322 | list_del(&cache->c_cache_list); |
323 | spin_unlock(&mb_cache_spinlock); |
324 | |
325 | list_for_each_safe(l, ltmp, &free_list) { |
326 | __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, |
327 | e_lru_list), GFP_KERNEL); |
328 | } |
329 | |
330 | if (atomic_read(&cache->c_entry_count) > 0) { |
331 | mb_error("cache %s: %d orphaned entries", |
332 | cache->c_name, |
333 | atomic_read(&cache->c_entry_count)); |
334 | } |
335 | |
336 | kmem_cache_destroy(cache->c_entry_cache); |
337 | |
338 | kfree(cache->c_index_hash); |
339 | kfree(cache->c_block_hash); |
340 | kfree(cache); |
341 | } |
342 | |
343 | /* |
344 | * mb_cache_entry_alloc() |
345 | * |
346 | * Allocates a new cache entry. The new entry will not be valid initially, |
347 | * and thus cannot be looked up yet. It should be filled with data, and |
348 | * then inserted into the cache using mb_cache_entry_insert(). Returns NULL |
349 | * if no more memory was available. |
350 | */ |
351 | struct mb_cache_entry * |
352 | mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) |
353 | { |
354 | struct mb_cache_entry *ce = NULL; |
355 | |
356 | if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) { |
357 | spin_lock(&mb_cache_spinlock); |
358 | if (!list_empty(&mb_cache_lru_list)) { |
359 | ce = list_entry(mb_cache_lru_list.next, |
360 | struct mb_cache_entry, e_lru_list); |
361 | list_del_init(&ce->e_lru_list); |
362 | __mb_cache_entry_unhash(ce); |
363 | } |
364 | spin_unlock(&mb_cache_spinlock); |
365 | } |
366 | if (!ce) { |
367 | ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); |
368 | if (!ce) |
369 | return NULL; |
370 | atomic_inc(&cache->c_entry_count); |
371 | INIT_LIST_HEAD(&ce->e_lru_list); |
372 | INIT_LIST_HEAD(&ce->e_block_list); |
373 | ce->e_cache = cache; |
374 | ce->e_queued = 0; |
375 | } |
376 | ce->e_used = 1 + MB_CACHE_WRITER; |
377 | return ce; |
378 | } |
379 | |
380 | |
381 | /* |
382 | * mb_cache_entry_insert() |
383 | * |
384 | * Inserts an entry that was allocated using mb_cache_entry_alloc() into |
385 | * the cache. After this, the cache entry can be looked up, but is not yet |
386 | * in the lru list as the caller still holds a handle to it. Returns 0 on |
387 | * success, or -EBUSY if a cache entry for that device + inode exists |
388 | * already (this may happen after a failed lookup, but when another process |
389 | * has inserted the same cache entry in the meantime). |
390 | * |
391 | * @bdev: device the cache entry belongs to |
392 | * @block: block number |
393 | * @key: lookup key |
394 | */ |
395 | int |
396 | mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, |
397 | sector_t block, unsigned int key) |
398 | { |
399 | struct mb_cache *cache = ce->e_cache; |
400 | unsigned int bucket; |
401 | struct list_head *l; |
402 | int error = -EBUSY; |
403 | |
404 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), |
405 | cache->c_bucket_bits); |
406 | spin_lock(&mb_cache_spinlock); |
407 | list_for_each_prev(l, &cache->c_block_hash[bucket]) { |
408 | struct mb_cache_entry *ce = |
409 | list_entry(l, struct mb_cache_entry, e_block_list); |
410 | if (ce->e_bdev == bdev && ce->e_block == block) |
411 | goto out; |
412 | } |
413 | __mb_cache_entry_unhash(ce); |
414 | ce->e_bdev = bdev; |
415 | ce->e_block = block; |
416 | list_add(&ce->e_block_list, &cache->c_block_hash[bucket]); |
417 | ce->e_index.o_key = key; |
418 | bucket = hash_long(key, cache->c_bucket_bits); |
419 | list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]); |
420 | error = 0; |
421 | out: |
422 | spin_unlock(&mb_cache_spinlock); |
423 | return error; |
424 | } |
425 | |
426 | |
427 | /* |
428 | * mb_cache_entry_release() |
429 | * |
430 | * Release a handle to a cache entry. When the last handle to a cache entry |
431 | * is released it is either freed (if it is invalid) or otherwise inserted |
432 | * in to the lru list. |
433 | */ |
434 | void |
435 | mb_cache_entry_release(struct mb_cache_entry *ce) |
436 | { |
437 | spin_lock(&mb_cache_spinlock); |
438 | __mb_cache_entry_release_unlock(ce); |
439 | } |
440 | |
441 | |
442 | /* |
443 | * mb_cache_entry_free() |
444 | * |
445 | * This is equivalent to the sequence mb_cache_entry_takeout() -- |
446 | * mb_cache_entry_release(). |
447 | */ |
448 | void |
449 | mb_cache_entry_free(struct mb_cache_entry *ce) |
450 | { |
451 | spin_lock(&mb_cache_spinlock); |
452 | mb_assert(list_empty(&ce->e_lru_list)); |
453 | __mb_cache_entry_unhash(ce); |
454 | __mb_cache_entry_release_unlock(ce); |
455 | } |
456 | |
457 | |
458 | /* |
459 | * mb_cache_entry_get() |
460 | * |
461 | * Get a cache entry by device / block number. (There can only be one entry |
462 | * in the cache per device and block.) Returns NULL if no such cache entry |
463 | * exists. The returned cache entry is locked for exclusive access ("single |
464 | * writer"). |
465 | */ |
466 | struct mb_cache_entry * |
467 | mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, |
468 | sector_t block) |
469 | { |
470 | unsigned int bucket; |
471 | struct list_head *l; |
472 | struct mb_cache_entry *ce; |
473 | |
474 | bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), |
475 | cache->c_bucket_bits); |
476 | spin_lock(&mb_cache_spinlock); |
477 | list_for_each(l, &cache->c_block_hash[bucket]) { |
478 | ce = list_entry(l, struct mb_cache_entry, e_block_list); |
479 | if (ce->e_bdev == bdev && ce->e_block == block) { |
480 | DEFINE_WAIT(wait); |
481 | |
482 | if (!list_empty(&ce->e_lru_list)) |
483 | list_del_init(&ce->e_lru_list); |
484 | |
485 | while (ce->e_used > 0) { |
486 | ce->e_queued++; |
487 | prepare_to_wait(&mb_cache_queue, &wait, |
488 | TASK_UNINTERRUPTIBLE); |
489 | spin_unlock(&mb_cache_spinlock); |
490 | schedule(); |
491 | spin_lock(&mb_cache_spinlock); |
492 | ce->e_queued--; |
493 | } |
494 | finish_wait(&mb_cache_queue, &wait); |
495 | ce->e_used += 1 + MB_CACHE_WRITER; |
496 | |
497 | if (!__mb_cache_entry_is_hashed(ce)) { |
498 | __mb_cache_entry_release_unlock(ce); |
499 | return NULL; |
500 | } |
501 | goto cleanup; |
502 | } |
503 | } |
504 | ce = NULL; |
505 | |
506 | cleanup: |
507 | spin_unlock(&mb_cache_spinlock); |
508 | return ce; |
509 | } |
510 | |
511 | #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) |
512 | |
513 | static struct mb_cache_entry * |
514 | __mb_cache_entry_find(struct list_head *l, struct list_head *head, |
515 | struct block_device *bdev, unsigned int key) |
516 | { |
517 | while (l != head) { |
518 | struct mb_cache_entry *ce = |
519 | list_entry(l, struct mb_cache_entry, e_index.o_list); |
520 | if (ce->e_bdev == bdev && ce->e_index.o_key == key) { |
521 | DEFINE_WAIT(wait); |
522 | |
523 | if (!list_empty(&ce->e_lru_list)) |
524 | list_del_init(&ce->e_lru_list); |
525 | |
526 | /* Incrementing before holding the lock gives readers |
527 | priority over writers. */ |
528 | ce->e_used++; |
529 | while (ce->e_used >= MB_CACHE_WRITER) { |
530 | ce->e_queued++; |
531 | prepare_to_wait(&mb_cache_queue, &wait, |
532 | TASK_UNINTERRUPTIBLE); |
533 | spin_unlock(&mb_cache_spinlock); |
534 | schedule(); |
535 | spin_lock(&mb_cache_spinlock); |
536 | ce->e_queued--; |
537 | } |
538 | finish_wait(&mb_cache_queue, &wait); |
539 | |
540 | if (!__mb_cache_entry_is_hashed(ce)) { |
541 | __mb_cache_entry_release_unlock(ce); |
542 | spin_lock(&mb_cache_spinlock); |
543 | return ERR_PTR(-EAGAIN); |
544 | } |
545 | return ce; |
546 | } |
547 | l = l->next; |
548 | } |
549 | return NULL; |
550 | } |
551 | |
552 | |
553 | /* |
554 | * mb_cache_entry_find_first() |
555 | * |
556 | * Find the first cache entry on a given device with a certain key in |
557 | * an additional index. Additonal matches can be found with |
558 | * mb_cache_entry_find_next(). Returns NULL if no match was found. The |
559 | * returned cache entry is locked for shared access ("multiple readers"). |
560 | * |
561 | * @cache: the cache to search |
562 | * @bdev: the device the cache entry should belong to |
563 | * @key: the key in the index |
564 | */ |
565 | struct mb_cache_entry * |
566 | mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev, |
567 | unsigned int key) |
568 | { |
569 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); |
570 | struct list_head *l; |
571 | struct mb_cache_entry *ce; |
572 | |
573 | spin_lock(&mb_cache_spinlock); |
574 | l = cache->c_index_hash[bucket].next; |
575 | ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); |
576 | spin_unlock(&mb_cache_spinlock); |
577 | return ce; |
578 | } |
579 | |
580 | |
581 | /* |
582 | * mb_cache_entry_find_next() |
583 | * |
584 | * Find the next cache entry on a given device with a certain key in an |
585 | * additional index. Returns NULL if no match could be found. The previous |
586 | * entry is atomatically released, so that mb_cache_entry_find_next() can |
587 | * be called like this: |
588 | * |
589 | * entry = mb_cache_entry_find_first(); |
590 | * while (entry) { |
591 | * ... |
592 | * entry = mb_cache_entry_find_next(entry, ...); |
593 | * } |
594 | * |
595 | * @prev: The previous match |
596 | * @bdev: the device the cache entry should belong to |
597 | * @key: the key in the index |
598 | */ |
599 | struct mb_cache_entry * |
600 | mb_cache_entry_find_next(struct mb_cache_entry *prev, |
601 | struct block_device *bdev, unsigned int key) |
602 | { |
603 | struct mb_cache *cache = prev->e_cache; |
604 | unsigned int bucket = hash_long(key, cache->c_bucket_bits); |
605 | struct list_head *l; |
606 | struct mb_cache_entry *ce; |
607 | |
608 | spin_lock(&mb_cache_spinlock); |
609 | l = prev->e_index.o_list.next; |
610 | ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key); |
611 | __mb_cache_entry_release_unlock(prev); |
612 | return ce; |
613 | } |
614 | |
615 | #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */ |
616 | |
617 | static int __init init_mbcache(void) |
618 | { |
619 | register_shrinker(&mb_cache_shrinker); |
620 | return 0; |
621 | } |
622 | |
623 | static void __exit exit_mbcache(void) |
624 | { |
625 | unregister_shrinker(&mb_cache_shrinker); |
626 | } |
627 | |
628 | module_init(init_mbcache) |
629 | module_exit(exit_mbcache) |
630 | |
631 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9