Root/
1 | #include <linux/atomic.h> |
2 | #include <linux/rwsem.h> |
3 | #include <linux/percpu.h> |
4 | #include <linux/wait.h> |
5 | #include <linux/lockdep.h> |
6 | #include <linux/percpu-rwsem.h> |
7 | #include <linux/rcupdate.h> |
8 | #include <linux/sched.h> |
9 | #include <linux/errno.h> |
10 | |
11 | int __percpu_init_rwsem(struct percpu_rw_semaphore *brw, |
12 | const char *name, struct lock_class_key *rwsem_key) |
13 | { |
14 | brw->fast_read_ctr = alloc_percpu(int); |
15 | if (unlikely(!brw->fast_read_ctr)) |
16 | return -ENOMEM; |
17 | |
18 | /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ |
19 | __init_rwsem(&brw->rw_sem, name, rwsem_key); |
20 | atomic_set(&brw->write_ctr, 0); |
21 | atomic_set(&brw->slow_read_ctr, 0); |
22 | init_waitqueue_head(&brw->write_waitq); |
23 | return 0; |
24 | } |
25 | |
26 | void percpu_free_rwsem(struct percpu_rw_semaphore *brw) |
27 | { |
28 | free_percpu(brw->fast_read_ctr); |
29 | brw->fast_read_ctr = NULL; /* catch use after free bugs */ |
30 | } |
31 | |
32 | /* |
33 | * This is the fast-path for down_read/up_read, it only needs to ensure |
34 | * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the |
35 | * fast per-cpu counter. The writer uses synchronize_sched_expedited() to |
36 | * serialize with the preempt-disabled section below. |
37 | * |
38 | * The nontrivial part is that we should guarantee acquire/release semantics |
39 | * in case when |
40 | * |
41 | * R_W: down_write() comes after up_read(), the writer should see all |
42 | * changes done by the reader |
43 | * or |
44 | * W_R: down_read() comes after up_write(), the reader should see all |
45 | * changes done by the writer |
46 | * |
47 | * If this helper fails the callers rely on the normal rw_semaphore and |
48 | * atomic_dec_and_test(), so in this case we have the necessary barriers. |
49 | * |
50 | * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or |
51 | * __this_cpu_add() below can be reordered with any LOAD/STORE done by the |
52 | * reader inside the critical section. See the comments in down_write and |
53 | * up_write below. |
54 | */ |
55 | static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val) |
56 | { |
57 | bool success = false; |
58 | |
59 | preempt_disable(); |
60 | if (likely(!atomic_read(&brw->write_ctr))) { |
61 | __this_cpu_add(*brw->fast_read_ctr, val); |
62 | success = true; |
63 | } |
64 | preempt_enable(); |
65 | |
66 | return success; |
67 | } |
68 | |
69 | /* |
70 | * Like the normal down_read() this is not recursive, the writer can |
71 | * come after the first percpu_down_read() and create the deadlock. |
72 | * |
73 | * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep, |
74 | * percpu_up_read() does rwsem_release(). This pairs with the usage |
75 | * of ->rw_sem in percpu_down/up_write(). |
76 | */ |
77 | void percpu_down_read(struct percpu_rw_semaphore *brw) |
78 | { |
79 | might_sleep(); |
80 | if (likely(update_fast_ctr(brw, +1))) { |
81 | rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_); |
82 | return; |
83 | } |
84 | |
85 | down_read(&brw->rw_sem); |
86 | atomic_inc(&brw->slow_read_ctr); |
87 | /* avoid up_read()->rwsem_release() */ |
88 | __up_read(&brw->rw_sem); |
89 | } |
90 | |
91 | void percpu_up_read(struct percpu_rw_semaphore *brw) |
92 | { |
93 | rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_); |
94 | |
95 | if (likely(update_fast_ctr(brw, -1))) |
96 | return; |
97 | |
98 | /* false-positive is possible but harmless */ |
99 | if (atomic_dec_and_test(&brw->slow_read_ctr)) |
100 | wake_up_all(&brw->write_waitq); |
101 | } |
102 | |
103 | static int clear_fast_ctr(struct percpu_rw_semaphore *brw) |
104 | { |
105 | unsigned int sum = 0; |
106 | int cpu; |
107 | |
108 | for_each_possible_cpu(cpu) { |
109 | sum += per_cpu(*brw->fast_read_ctr, cpu); |
110 | per_cpu(*brw->fast_read_ctr, cpu) = 0; |
111 | } |
112 | |
113 | return sum; |
114 | } |
115 | |
116 | /* |
117 | * A writer increments ->write_ctr to force the readers to switch to the |
118 | * slow mode, note the atomic_read() check in update_fast_ctr(). |
119 | * |
120 | * After that the readers can only inc/dec the slow ->slow_read_ctr counter, |
121 | * ->fast_read_ctr is stable. Once the writer moves its sum into the slow |
122 | * counter it represents the number of active readers. |
123 | * |
124 | * Finally the writer takes ->rw_sem for writing and blocks the new readers, |
125 | * then waits until the slow counter becomes zero. |
126 | */ |
127 | void percpu_down_write(struct percpu_rw_semaphore *brw) |
128 | { |
129 | /* tell update_fast_ctr() there is a pending writer */ |
130 | atomic_inc(&brw->write_ctr); |
131 | /* |
132 | * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read |
133 | * so that update_fast_ctr() can't succeed. |
134 | * |
135 | * 2. Ensures we see the result of every previous this_cpu_add() in |
136 | * update_fast_ctr(). |
137 | * |
138 | * 3. Ensures that if any reader has exited its critical section via |
139 | * fast-path, it executes a full memory barrier before we return. |
140 | * See R_W case in the comment above update_fast_ctr(). |
141 | */ |
142 | synchronize_sched_expedited(); |
143 | |
144 | /* exclude other writers, and block the new readers completely */ |
145 | down_write(&brw->rw_sem); |
146 | |
147 | /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */ |
148 | atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr); |
149 | |
150 | /* wait for all readers to complete their percpu_up_read() */ |
151 | wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr)); |
152 | } |
153 | |
154 | void percpu_up_write(struct percpu_rw_semaphore *brw) |
155 | { |
156 | /* release the lock, but the readers can't use the fast-path */ |
157 | up_write(&brw->rw_sem); |
158 | /* |
159 | * Insert the barrier before the next fast-path in down_read, |
160 | * see W_R case in the comment above update_fast_ctr(). |
161 | */ |
162 | synchronize_sched_expedited(); |
163 | /* the last writer unblocks update_fast_ctr() */ |
164 | atomic_dec(&brw->write_ctr); |
165 | } |
166 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9