Root/
1 | #ifndef _LINUX_BLOCKGROUP_LOCK_H |
2 | #define _LINUX_BLOCKGROUP_LOCK_H |
3 | /* |
4 | * Per-blockgroup locking for ext2 and ext3. |
5 | * |
6 | * Simple hashed spinlocking. |
7 | */ |
8 | |
9 | #include <linux/spinlock.h> |
10 | #include <linux/cache.h> |
11 | |
12 | #ifdef CONFIG_SMP |
13 | |
14 | /* |
15 | * We want a power-of-two. Is there a better way than this? |
16 | */ |
17 | |
18 | #if NR_CPUS >= 32 |
19 | #define NR_BG_LOCKS 128 |
20 | #elif NR_CPUS >= 16 |
21 | #define NR_BG_LOCKS 64 |
22 | #elif NR_CPUS >= 8 |
23 | #define NR_BG_LOCKS 32 |
24 | #elif NR_CPUS >= 4 |
25 | #define NR_BG_LOCKS 16 |
26 | #elif NR_CPUS >= 2 |
27 | #define NR_BG_LOCKS 8 |
28 | #else |
29 | #define NR_BG_LOCKS 4 |
30 | #endif |
31 | |
32 | #else /* CONFIG_SMP */ |
33 | #define NR_BG_LOCKS 1 |
34 | #endif /* CONFIG_SMP */ |
35 | |
36 | struct bgl_lock { |
37 | spinlock_t lock; |
38 | } ____cacheline_aligned_in_smp; |
39 | |
40 | struct blockgroup_lock { |
41 | struct bgl_lock locks[NR_BG_LOCKS]; |
42 | }; |
43 | |
44 | static inline void bgl_lock_init(struct blockgroup_lock *bgl) |
45 | { |
46 | int i; |
47 | |
48 | for (i = 0; i < NR_BG_LOCKS; i++) |
49 | spin_lock_init(&bgl->locks[i].lock); |
50 | } |
51 | |
52 | /* |
53 | * The accessor is a macro so we can embed a blockgroup_lock into different |
54 | * superblock types |
55 | */ |
56 | static inline spinlock_t * |
57 | bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) |
58 | { |
59 | return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock; |
60 | } |
61 | |
62 | #endif |
63 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9